mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	x86/vdso: Use generic VDSO clock mode storage
Switch to the generic VDSO clock mode storage. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com> (VDSO parts) Acked-by: Juergen Gross <jgross@suse.com> (Xen parts) Acked-by: Paolo Bonzini <pbonzini@redhat.com> (KVM parts) Link: https://lkml.kernel.org/r/20200207124403.152039903@linutronix.de
This commit is contained in:
		
							parent
							
								
									5d51bee725
								
							
						
					
					
						commit
						b95a8a27c3
					
				
					 13 changed files with 46 additions and 63 deletions
				
			
		| 
						 | 
				
			
			@ -57,7 +57,6 @@ config X86
 | 
			
		|||
	select ACPI_LEGACY_TABLES_LOOKUP	if ACPI
 | 
			
		||||
	select ACPI_SYSTEM_POWER_STATES_SUPPORT	if ACPI
 | 
			
		||||
	select ARCH_32BIT_OFF_T			if X86_32
 | 
			
		||||
	select ARCH_CLOCKSOURCE_DATA
 | 
			
		||||
	select ARCH_CLOCKSOURCE_INIT
 | 
			
		||||
	select ARCH_HAS_ACPI_TABLE_UPGRADE	if ACPI
 | 
			
		||||
	select ARCH_HAS_DEBUG_VIRTUAL
 | 
			
		||||
| 
						 | 
				
			
			@ -126,6 +125,7 @@ config X86
 | 
			
		|||
	select GENERIC_STRNLEN_USER
 | 
			
		||||
	select GENERIC_TIME_VSYSCALL
 | 
			
		||||
	select GENERIC_GETTIMEOFDAY
 | 
			
		||||
	select GENERIC_VDSO_CLOCK_MODE
 | 
			
		||||
	select GENERIC_VDSO_TIME_NS
 | 
			
		||||
	select GUP_GET_PTE_LOW_HIGH		if X86_PAE
 | 
			
		||||
	select HARDLOCKUP_CHECK_TIMESTAMP	if X86_64
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -221,7 +221,7 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
 | 
			
		|||
	} else if (sym_offset == image->sym_pvclock_page) {
 | 
			
		||||
		struct pvclock_vsyscall_time_info *pvti =
 | 
			
		||||
			pvclock_get_pvti_cpu0_va();
 | 
			
		||||
		if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
 | 
			
		||||
		if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)) {
 | 
			
		||||
			return vmf_insert_pfn_prot(vma, vmf->address,
 | 
			
		||||
					__pa(pvti) >> PAGE_SHIFT,
 | 
			
		||||
					pgprot_decrypted(vma->vm_page_prot));
 | 
			
		||||
| 
						 | 
				
			
			@ -229,7 +229,7 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
 | 
			
		|||
	} else if (sym_offset == image->sym_hvclock_page) {
 | 
			
		||||
		struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
 | 
			
		||||
 | 
			
		||||
		if (tsc_pg && vclock_was_used(VCLOCK_HVCLOCK))
 | 
			
		||||
		if (tsc_pg && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
 | 
			
		||||
			return vmf_insert_pfn(vma, vmf->address,
 | 
			
		||||
					virt_to_phys(tsc_pg) >> PAGE_SHIFT);
 | 
			
		||||
	} else if (sym_offset == image->sym_timens_page) {
 | 
			
		||||
| 
						 | 
				
			
			@ -447,7 +447,7 @@ __setup("vdso=", vdso_setup);
 | 
			
		|||
 | 
			
		||||
static int __init init_vdso(void)
 | 
			
		||||
{
 | 
			
		||||
	BUILD_BUG_ON(VCLOCK_MAX >= 32);
 | 
			
		||||
	BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32);
 | 
			
		||||
 | 
			
		||||
	init_vdso_image(&vdso_image_64);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -4,15 +4,10 @@
 | 
			
		|||
#ifndef _ASM_X86_CLOCKSOURCE_H
 | 
			
		||||
#define _ASM_X86_CLOCKSOURCE_H
 | 
			
		||||
 | 
			
		||||
#define VCLOCK_NONE	0	/* No vDSO clock available.		*/
 | 
			
		||||
#define VCLOCK_TSC	1	/* vDSO should use vread_tsc.		*/
 | 
			
		||||
#define VCLOCK_PVCLOCK	2	/* vDSO should use vread_pvclock.	*/
 | 
			
		||||
#define VCLOCK_HVCLOCK	3	/* vDSO should use vread_hvclock.	*/
 | 
			
		||||
#define VCLOCK_MAX	3
 | 
			
		||||
 | 
			
		||||
struct arch_clocksource_data {
 | 
			
		||||
	int vclock_mode;
 | 
			
		||||
};
 | 
			
		||||
#define VDSO_ARCH_CLOCKMODES	\
 | 
			
		||||
	VDSO_CLOCKMODE_TSC,	\
 | 
			
		||||
	VDSO_CLOCKMODE_PVCLOCK,	\
 | 
			
		||||
	VDSO_CLOCKMODE_HVCLOCK
 | 
			
		||||
 | 
			
		||||
extern unsigned int vclocks_used;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -46,9 +46,9 @@ typedef int (*hyperv_fill_flush_list_func)(
 | 
			
		|||
#define hv_set_reference_tsc(val) \
 | 
			
		||||
	wrmsrl(HV_X64_MSR_REFERENCE_TSC, val)
 | 
			
		||||
#define hv_set_clocksource_vdso(val) \
 | 
			
		||||
	((val).archdata.vclock_mode = VCLOCK_HVCLOCK)
 | 
			
		||||
	((val).vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK)
 | 
			
		||||
#define hv_enable_vdso_clocksource() \
 | 
			
		||||
	vclocks_set_used(VCLOCK_HVCLOCK);
 | 
			
		||||
	vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
 | 
			
		||||
#define hv_get_raw_timer() rdtsc_ordered()
 | 
			
		||||
 | 
			
		||||
void hyperv_callback_vector(void);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -243,7 +243,7 @@ static u64 vread_hvclock(void)
 | 
			
		|||
 | 
			
		||||
static inline u64 __arch_get_hw_counter(s32 clock_mode)
 | 
			
		||||
{
 | 
			
		||||
	if (likely(clock_mode == VCLOCK_TSC))
 | 
			
		||||
	if (likely(clock_mode == VDSO_CLOCKMODE_TSC))
 | 
			
		||||
		return (u64)rdtsc_ordered();
 | 
			
		||||
	/*
 | 
			
		||||
	 * For any memory-mapped vclock type, we need to make sure that gcc
 | 
			
		||||
| 
						 | 
				
			
			@ -252,13 +252,13 @@ static inline u64 __arch_get_hw_counter(s32 clock_mode)
 | 
			
		|||
	 * question isn't enabled, which will segfault.  Hence the barriers.
 | 
			
		||||
	 */
 | 
			
		||||
#ifdef CONFIG_PARAVIRT_CLOCK
 | 
			
		||||
	if (clock_mode == VCLOCK_PVCLOCK) {
 | 
			
		||||
	if (clock_mode == VDSO_CLOCKMODE_PVCLOCK) {
 | 
			
		||||
		barrier();
 | 
			
		||||
		return vread_pvclock();
 | 
			
		||||
	}
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef CONFIG_HYPERV_TIMER
 | 
			
		||||
	if (clock_mode == VCLOCK_HVCLOCK) {
 | 
			
		||||
	if (clock_mode == VDSO_CLOCKMODE_HVCLOCK) {
 | 
			
		||||
		barrier();
 | 
			
		||||
		return vread_hvclock();
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -21,13 +21,6 @@ struct vdso_data *__x86_get_k_vdso_data(void)
 | 
			
		|||
}
 | 
			
		||||
#define __arch_get_k_vdso_data __x86_get_k_vdso_data
 | 
			
		||||
 | 
			
		||||
static __always_inline
 | 
			
		||||
int __x86_get_clock_mode(struct timekeeper *tk)
 | 
			
		||||
{
 | 
			
		||||
	return tk->tkr_mono.clock->archdata.vclock_mode;
 | 
			
		||||
}
 | 
			
		||||
#define __arch_get_clock_mode __x86_get_clock_mode
 | 
			
		||||
 | 
			
		||||
/* The asm-generic header needs to be included after the definitions above */
 | 
			
		||||
#include <asm-generic/vdso/vsyscall.h>
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -161,7 +161,7 @@ bool kvm_check_and_clear_guest_paused(void)
 | 
			
		|||
 | 
			
		||||
static int kvm_cs_enable(struct clocksource *cs)
 | 
			
		||||
{
 | 
			
		||||
	vclocks_set_used(VCLOCK_PVCLOCK);
 | 
			
		||||
	vclocks_set_used(VDSO_CLOCKMODE_PVCLOCK);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -279,7 +279,7 @@ static int __init kvm_setup_vsyscall_timeinfo(void)
 | 
			
		|||
	if (!(flags & PVCLOCK_TSC_STABLE_BIT))
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
 | 
			
		||||
	kvm_clock.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	kvmclock_init_mem();
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -145,7 +145,7 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
 | 
			
		|||
 | 
			
		||||
void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti)
 | 
			
		||||
{
 | 
			
		||||
	WARN_ON(vclock_was_used(VCLOCK_PVCLOCK));
 | 
			
		||||
	WARN_ON(vclock_was_used(VDSO_CLOCKMODE_PVCLOCK));
 | 
			
		||||
	pvti_cpu0_va = pvti;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -122,18 +122,12 @@ void __init time_init(void)
 | 
			
		|||
 */
 | 
			
		||||
void clocksource_arch_init(struct clocksource *cs)
 | 
			
		||||
{
 | 
			
		||||
	if (cs->archdata.vclock_mode == VCLOCK_NONE)
 | 
			
		||||
	if (cs->vdso_clock_mode == VDSO_CLOCKMODE_NONE)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	if (cs->archdata.vclock_mode > VCLOCK_MAX) {
 | 
			
		||||
		pr_warn("clocksource %s registered with invalid vclock_mode %d. Disabling vclock.\n",
 | 
			
		||||
			cs->name, cs->archdata.vclock_mode);
 | 
			
		||||
		cs->archdata.vclock_mode = VCLOCK_NONE;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (cs->mask != CLOCKSOURCE_MASK(64)) {
 | 
			
		||||
		pr_warn("clocksource %s registered with invalid mask %016llx. Disabling vclock.\n",
 | 
			
		||||
		pr_warn("clocksource %s registered with invalid mask %016llx for VDSO. Disabling VDSO support.\n",
 | 
			
		||||
			cs->name, cs->mask);
 | 
			
		||||
		cs->archdata.vclock_mode = VCLOCK_NONE;
 | 
			
		||||
		cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1110,7 +1110,7 @@ static void tsc_cs_tick_stable(struct clocksource *cs)
 | 
			
		|||
 | 
			
		||||
static int tsc_cs_enable(struct clocksource *cs)
 | 
			
		||||
{
 | 
			
		||||
	vclocks_set_used(VCLOCK_TSC);
 | 
			
		||||
	vclocks_set_used(VDSO_CLOCKMODE_TSC);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1124,7 +1124,7 @@ static struct clocksource clocksource_tsc_early = {
 | 
			
		|||
	.mask			= CLOCKSOURCE_MASK(64),
 | 
			
		||||
	.flags			= CLOCK_SOURCE_IS_CONTINUOUS |
 | 
			
		||||
				  CLOCK_SOURCE_MUST_VERIFY,
 | 
			
		||||
	.archdata		= { .vclock_mode = VCLOCK_TSC },
 | 
			
		||||
	.vdso_clock_mode	= VDSO_CLOCKMODE_TSC,
 | 
			
		||||
	.enable			= tsc_cs_enable,
 | 
			
		||||
	.resume			= tsc_resume,
 | 
			
		||||
	.mark_unstable		= tsc_cs_mark_unstable,
 | 
			
		||||
| 
						 | 
				
			
			@ -1145,7 +1145,7 @@ static struct clocksource clocksource_tsc = {
 | 
			
		|||
	.flags			= CLOCK_SOURCE_IS_CONTINUOUS |
 | 
			
		||||
				  CLOCK_SOURCE_VALID_FOR_HRES |
 | 
			
		||||
				  CLOCK_SOURCE_MUST_VERIFY,
 | 
			
		||||
	.archdata		= { .vclock_mode = VCLOCK_TSC },
 | 
			
		||||
	.vdso_clock_mode	= VDSO_CLOCKMODE_TSC,
 | 
			
		||||
	.enable			= tsc_cs_enable,
 | 
			
		||||
	.resume			= tsc_resume,
 | 
			
		||||
	.mark_unstable		= tsc_cs_mark_unstable,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -815,8 +815,8 @@ TRACE_EVENT(kvm_write_tsc_offset,
 | 
			
		|||
#ifdef CONFIG_X86_64
 | 
			
		||||
 | 
			
		||||
#define host_clocks					\
 | 
			
		||||
	{VCLOCK_NONE, "none"},				\
 | 
			
		||||
	{VCLOCK_TSC,  "tsc"}				\
 | 
			
		||||
	{VDSO_CLOCKMODE_NONE, "none"},			\
 | 
			
		||||
	{VDSO_CLOCKMODE_TSC,  "tsc"}			\
 | 
			
		||||
 | 
			
		||||
TRACE_EVENT(kvm_update_master_clock,
 | 
			
		||||
	TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1631,7 +1631,7 @@ static void update_pvclock_gtod(struct timekeeper *tk)
 | 
			
		|||
	write_seqcount_begin(&vdata->seq);
 | 
			
		||||
 | 
			
		||||
	/* copy pvclock gtod data */
 | 
			
		||||
	vdata->clock.vclock_mode	= tk->tkr_mono.clock->archdata.vclock_mode;
 | 
			
		||||
	vdata->clock.vclock_mode	= tk->tkr_mono.clock->vdso_clock_mode;
 | 
			
		||||
	vdata->clock.cycle_last		= tk->tkr_mono.cycle_last;
 | 
			
		||||
	vdata->clock.mask		= tk->tkr_mono.mask;
 | 
			
		||||
	vdata->clock.mult		= tk->tkr_mono.mult;
 | 
			
		||||
| 
						 | 
				
			
			@ -1639,7 +1639,7 @@ static void update_pvclock_gtod(struct timekeeper *tk)
 | 
			
		|||
	vdata->clock.base_cycles	= tk->tkr_mono.xtime_nsec;
 | 
			
		||||
	vdata->clock.offset		= tk->tkr_mono.base;
 | 
			
		||||
 | 
			
		||||
	vdata->raw_clock.vclock_mode	= tk->tkr_raw.clock->archdata.vclock_mode;
 | 
			
		||||
	vdata->raw_clock.vclock_mode	= tk->tkr_raw.clock->vdso_clock_mode;
 | 
			
		||||
	vdata->raw_clock.cycle_last	= tk->tkr_raw.cycle_last;
 | 
			
		||||
	vdata->raw_clock.mask		= tk->tkr_raw.mask;
 | 
			
		||||
	vdata->raw_clock.mult		= tk->tkr_raw.mult;
 | 
			
		||||
| 
						 | 
				
			
			@ -1840,7 +1840,7 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
 | 
			
		|||
 | 
			
		||||
static inline int gtod_is_based_on_tsc(int mode)
 | 
			
		||||
{
 | 
			
		||||
	return mode == VCLOCK_TSC || mode == VCLOCK_HVCLOCK;
 | 
			
		||||
	return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
 | 
			
		||||
| 
						 | 
				
			
			@ -1933,7 +1933,7 @@ static inline bool kvm_check_tsc_unstable(void)
 | 
			
		|||
	 * TSC is marked unstable when we're running on Hyper-V,
 | 
			
		||||
	 * 'TSC page' clocksource is good.
 | 
			
		||||
	 */
 | 
			
		||||
	if (pvclock_gtod_data.clock.vclock_mode == VCLOCK_HVCLOCK)
 | 
			
		||||
	if (pvclock_gtod_data.clock.vclock_mode == VDSO_CLOCKMODE_HVCLOCK)
 | 
			
		||||
		return false;
 | 
			
		||||
#endif
 | 
			
		||||
	return check_tsc_unstable();
 | 
			
		||||
| 
						 | 
				
			
			@ -2088,30 +2088,30 @@ static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
 | 
			
		|||
	u64 tsc_pg_val;
 | 
			
		||||
 | 
			
		||||
	switch (clock->vclock_mode) {
 | 
			
		||||
	case VCLOCK_HVCLOCK:
 | 
			
		||||
	case VDSO_CLOCKMODE_HVCLOCK:
 | 
			
		||||
		tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(),
 | 
			
		||||
						  tsc_timestamp);
 | 
			
		||||
		if (tsc_pg_val != U64_MAX) {
 | 
			
		||||
			/* TSC page valid */
 | 
			
		||||
			*mode = VCLOCK_HVCLOCK;
 | 
			
		||||
			*mode = VDSO_CLOCKMODE_HVCLOCK;
 | 
			
		||||
			v = (tsc_pg_val - clock->cycle_last) &
 | 
			
		||||
				clock->mask;
 | 
			
		||||
		} else {
 | 
			
		||||
			/* TSC page invalid */
 | 
			
		||||
			*mode = VCLOCK_NONE;
 | 
			
		||||
			*mode = VDSO_CLOCKMODE_NONE;
 | 
			
		||||
		}
 | 
			
		||||
		break;
 | 
			
		||||
	case VCLOCK_TSC:
 | 
			
		||||
		*mode = VCLOCK_TSC;
 | 
			
		||||
	case VDSO_CLOCKMODE_TSC:
 | 
			
		||||
		*mode = VDSO_CLOCKMODE_TSC;
 | 
			
		||||
		*tsc_timestamp = read_tsc();
 | 
			
		||||
		v = (*tsc_timestamp - clock->cycle_last) &
 | 
			
		||||
			clock->mask;
 | 
			
		||||
		break;
 | 
			
		||||
	default:
 | 
			
		||||
		*mode = VCLOCK_NONE;
 | 
			
		||||
		*mode = VDSO_CLOCKMODE_NONE;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (*mode == VCLOCK_NONE)
 | 
			
		||||
	if (*mode == VDSO_CLOCKMODE_NONE)
 | 
			
		||||
		*tsc_timestamp = v = 0;
 | 
			
		||||
 | 
			
		||||
	return v * clock->mult;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -147,7 +147,7 @@ static struct notifier_block xen_pvclock_gtod_notifier = {
 | 
			
		|||
 | 
			
		||||
static int xen_cs_enable(struct clocksource *cs)
 | 
			
		||||
{
 | 
			
		||||
	vclocks_set_used(VCLOCK_PVCLOCK);
 | 
			
		||||
	vclocks_set_used(VDSO_CLOCKMODE_PVCLOCK);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -419,12 +419,13 @@ void xen_restore_time_memory_area(void)
 | 
			
		|||
	ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * We don't disable VCLOCK_PVCLOCK entirely if it fails to register the
 | 
			
		||||
	 * secondary time info with Xen or if we migrated to a host without the
 | 
			
		||||
	 * necessary flags. On both of these cases what happens is either
 | 
			
		||||
	 * process seeing a zeroed out pvti or seeing no PVCLOCK_TSC_STABLE_BIT
 | 
			
		||||
	 * bit set. Userspace checks the latter and if 0, it discards the data
 | 
			
		||||
	 * in pvti and fallbacks to a system call for a reliable timestamp.
 | 
			
		||||
	 * We don't disable VDSO_CLOCKMODE_PVCLOCK entirely if it fails to
 | 
			
		||||
	 * register the secondary time info with Xen or if we migrated to a
 | 
			
		||||
	 * host without the necessary flags. On both of these cases what
 | 
			
		||||
	 * happens is either process seeing a zeroed out pvti or seeing no
 | 
			
		||||
	 * PVCLOCK_TSC_STABLE_BIT bit set. Userspace checks the latter and
 | 
			
		||||
	 * if 0, it discards the data in pvti and fallbacks to a system
 | 
			
		||||
	 * call for a reliable timestamp.
 | 
			
		||||
	 */
 | 
			
		||||
	if (ret != 0)
 | 
			
		||||
		pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
 | 
			
		||||
| 
						 | 
				
			
			@ -450,7 +451,7 @@ static void xen_setup_vsyscall_time_info(void)
 | 
			
		|||
 | 
			
		||||
	ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
 | 
			
		||||
	if (ret) {
 | 
			
		||||
		pr_notice("xen: VCLOCK_PVCLOCK not supported (err %d)\n", ret);
 | 
			
		||||
		pr_notice("xen: VDSO_CLOCKMODE_PVCLOCK not supported (err %d)\n", ret);
 | 
			
		||||
		free_page((unsigned long)ti);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -467,14 +468,14 @@ static void xen_setup_vsyscall_time_info(void)
 | 
			
		|||
		if (!ret)
 | 
			
		||||
			free_page((unsigned long)ti);
 | 
			
		||||
 | 
			
		||||
		pr_notice("xen: VCLOCK_PVCLOCK not supported (tsc unstable)\n");
 | 
			
		||||
		pr_notice("xen: VDSO_CLOCKMODE_PVCLOCK not supported (tsc unstable)\n");
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	xen_clock = ti;
 | 
			
		||||
	pvclock_set_pvti_cpu0_va(xen_clock);
 | 
			
		||||
 | 
			
		||||
	xen_clocksource.archdata.vclock_mode = VCLOCK_PVCLOCK;
 | 
			
		||||
	xen_clocksource.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void __init xen_time_init(void)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue