mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	Merge branch 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 vdso updates from Ingo Molnar: "Two main changes: - Cleanups, simplifications and CLOCK_TAI support (Thomas Gleixner) - Improve code generation (Andy Lutomirski)" * 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/vdso: Rearrange do_hres() to improve code generation x86/vdso: Document vgtod_ts better x86/vdso: Remove "memory" clobbers in the vDSO syscall fallbacks x66/vdso: Add CLOCK_TAI support x86/vdso: Move cycle_last handling into the caller x86/vdso: Simplify the invalid vclock case x86/vdso: Replace the clockid switch case x86/vdso: Collapse coarse functions x86/vdso: Collapse high resolution functions x86/vdso: Introduce and use vgtod_ts x86/vdso: Use unsigned int consistently for vsyscall_gtod_data:: Seq x86/vdso: Enforce 64bit clocksource x86/time: Implement clocksource_arch_init() clocksource: Provide clocksource_arch_init()
This commit is contained in:
		
						commit
						034bda1cd5
					
				
					 8 changed files with 154 additions and 192 deletions
				
			
		| 
						 | 
					@ -48,6 +48,7 @@ config X86
 | 
				
			||||||
	select ACPI_SYSTEM_POWER_STATES_SUPPORT	if ACPI
 | 
						select ACPI_SYSTEM_POWER_STATES_SUPPORT	if ACPI
 | 
				
			||||||
	select ANON_INODES
 | 
						select ANON_INODES
 | 
				
			||||||
	select ARCH_CLOCKSOURCE_DATA
 | 
						select ARCH_CLOCKSOURCE_DATA
 | 
				
			||||||
 | 
						select ARCH_CLOCKSOURCE_INIT
 | 
				
			||||||
	select ARCH_DISCARD_MEMBLOCK
 | 
						select ARCH_DISCARD_MEMBLOCK
 | 
				
			||||||
	select ARCH_HAS_ACPI_TABLE_UPGRADE	if ACPI
 | 
						select ARCH_HAS_ACPI_TABLE_UPGRADE	if ACPI
 | 
				
			||||||
	select ARCH_HAS_DEBUG_VIRTUAL
 | 
						select ARCH_HAS_DEBUG_VIRTUAL
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -45,21 +45,10 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 | 
				
			||||||
	long ret;
 | 
						long ret;
 | 
				
			||||||
	asm ("syscall" : "=a" (ret), "=m" (*ts) :
 | 
						asm ("syscall" : "=a" (ret), "=m" (*ts) :
 | 
				
			||||||
	     "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
 | 
						     "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
 | 
				
			||||||
	     "memory", "rcx", "r11");
 | 
						     "rcx", "r11");
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	long ret;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
 | 
					 | 
				
			||||||
	     "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
 | 
					 | 
				
			||||||
	     "memory", "rcx", "r11");
 | 
					 | 
				
			||||||
	return ret;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
 | 
					
 | 
				
			||||||
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 | 
					notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 | 
				
			||||||
| 
						 | 
					@ -73,22 +62,7 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 | 
				
			||||||
		"mov %%edx, %%ebx \n"
 | 
							"mov %%edx, %%ebx \n"
 | 
				
			||||||
		: "=a" (ret), "=m" (*ts)
 | 
							: "=a" (ret), "=m" (*ts)
 | 
				
			||||||
		: "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
 | 
							: "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
 | 
				
			||||||
		: "memory", "edx");
 | 
							: "edx");
 | 
				
			||||||
	return ret;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	long ret;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	asm (
 | 
					 | 
				
			||||||
		"mov %%ebx, %%edx \n"
 | 
					 | 
				
			||||||
		"mov %[tv], %%ebx \n"
 | 
					 | 
				
			||||||
		"call __kernel_vsyscall \n"
 | 
					 | 
				
			||||||
		"mov %%edx, %%ebx \n"
 | 
					 | 
				
			||||||
		: "=a" (ret), "=m" (*tv), "=m" (*tz)
 | 
					 | 
				
			||||||
		: "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
 | 
					 | 
				
			||||||
		: "memory", "edx");
 | 
					 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -100,12 +74,11 @@ static notrace const struct pvclock_vsyscall_time_info *get_pvti0(void)
 | 
				
			||||||
	return (const struct pvclock_vsyscall_time_info *)&pvclock_page;
 | 
						return (const struct pvclock_vsyscall_time_info *)&pvclock_page;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static notrace u64 vread_pvclock(int *mode)
 | 
					static notrace u64 vread_pvclock(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
 | 
						const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
 | 
				
			||||||
	u64 ret;
 | 
					 | 
				
			||||||
	u64 last;
 | 
					 | 
				
			||||||
	u32 version;
 | 
						u32 version;
 | 
				
			||||||
 | 
						u64 ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Note: The kernel and hypervisor must guarantee that cpu ID
 | 
						 * Note: The kernel and hypervisor must guarantee that cpu ID
 | 
				
			||||||
| 
						 | 
					@ -132,175 +105,112 @@ static notrace u64 vread_pvclock(int *mode)
 | 
				
			||||||
	do {
 | 
						do {
 | 
				
			||||||
		version = pvclock_read_begin(pvti);
 | 
							version = pvclock_read_begin(pvti);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT))) {
 | 
							if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT)))
 | 
				
			||||||
			*mode = VCLOCK_NONE;
 | 
								return U64_MAX;
 | 
				
			||||||
			return 0;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ret = __pvclock_read_cycles(pvti, rdtsc_ordered());
 | 
							ret = __pvclock_read_cycles(pvti, rdtsc_ordered());
 | 
				
			||||||
	} while (pvclock_read_retry(pvti, version));
 | 
						} while (pvclock_read_retry(pvti, version));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* refer to vread_tsc() comment for rationale */
 | 
					 | 
				
			||||||
	last = gtod->cycle_last;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (likely(ret >= last))
 | 
					 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
 | 
					 | 
				
			||||||
	return last;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
#ifdef CONFIG_HYPERV_TSCPAGE
 | 
					#ifdef CONFIG_HYPERV_TSCPAGE
 | 
				
			||||||
static notrace u64 vread_hvclock(int *mode)
 | 
					static notrace u64 vread_hvclock(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	const struct ms_hyperv_tsc_page *tsc_pg =
 | 
						const struct ms_hyperv_tsc_page *tsc_pg =
 | 
				
			||||||
		(const struct ms_hyperv_tsc_page *)&hvclock_page;
 | 
							(const struct ms_hyperv_tsc_page *)&hvclock_page;
 | 
				
			||||||
	u64 current_tick = hv_read_tsc_page(tsc_pg);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (current_tick != U64_MAX)
 | 
						return hv_read_tsc_page(tsc_pg);
 | 
				
			||||||
		return current_tick;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	*mode = VCLOCK_NONE;
 | 
					 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
notrace static u64 vread_tsc(void)
 | 
					notrace static inline u64 vgetcyc(int mode)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	u64 ret = (u64)rdtsc_ordered();
 | 
						if (mode == VCLOCK_TSC)
 | 
				
			||||||
	u64 last = gtod->cycle_last;
 | 
							return (u64)rdtsc_ordered();
 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (likely(ret >= last))
 | 
					 | 
				
			||||||
		return ret;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * GCC likes to generate cmov here, but this branch is extremely
 | 
					 | 
				
			||||||
	 * predictable (it's just a function of time and the likely is
 | 
					 | 
				
			||||||
	 * very likely) and there's a data dependence, so force GCC
 | 
					 | 
				
			||||||
	 * to generate a branch instead.  I don't barrier() because
 | 
					 | 
				
			||||||
	 * we don't actually need a barrier, and if this function
 | 
					 | 
				
			||||||
	 * ever gets inlined it will generate worse code.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	asm volatile ("");
 | 
					 | 
				
			||||||
	return last;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
notrace static inline u64 vgetsns(int *mode)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	u64 v;
 | 
					 | 
				
			||||||
	cycles_t cycles;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (gtod->vclock_mode == VCLOCK_TSC)
 | 
					 | 
				
			||||||
		cycles = vread_tsc();
 | 
					 | 
				
			||||||
#ifdef CONFIG_PARAVIRT_CLOCK
 | 
					#ifdef CONFIG_PARAVIRT_CLOCK
 | 
				
			||||||
	else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
 | 
						else if (mode == VCLOCK_PVCLOCK)
 | 
				
			||||||
		cycles = vread_pvclock(mode);
 | 
							return vread_pvclock();
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
#ifdef CONFIG_HYPERV_TSCPAGE
 | 
					#ifdef CONFIG_HYPERV_TSCPAGE
 | 
				
			||||||
	else if (gtod->vclock_mode == VCLOCK_HVCLOCK)
 | 
						else if (mode == VCLOCK_HVCLOCK)
 | 
				
			||||||
		cycles = vread_hvclock(mode);
 | 
							return vread_hvclock();
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
	else
 | 
						return U64_MAX;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					notrace static int do_hres(clockid_t clk, struct timespec *ts)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct vgtod_ts *base = >od->basetime[clk];
 | 
				
			||||||
 | 
						u64 cycles, last, sec, ns;
 | 
				
			||||||
 | 
						unsigned int seq;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						do {
 | 
				
			||||||
 | 
							seq = gtod_read_begin(gtod);
 | 
				
			||||||
 | 
							cycles = vgetcyc(gtod->vclock_mode);
 | 
				
			||||||
 | 
							ns = base->nsec;
 | 
				
			||||||
 | 
							last = gtod->cycle_last;
 | 
				
			||||||
 | 
							if (unlikely((s64)cycles < 0))
 | 
				
			||||||
 | 
								return vdso_fallback_gettime(clk, ts);
 | 
				
			||||||
 | 
							if (cycles > last)
 | 
				
			||||||
 | 
								ns += (cycles - last) * gtod->mult;
 | 
				
			||||||
 | 
							ns >>= gtod->shift;
 | 
				
			||||||
 | 
							sec = base->sec;
 | 
				
			||||||
 | 
						} while (unlikely(gtod_read_retry(gtod, seq)));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Do this outside the loop: a race inside the loop could result
 | 
				
			||||||
 | 
						 * in __iter_div_u64_rem() being extremely slow.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
 | 
				
			||||||
 | 
						ts->tv_nsec = ns;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
	v = (cycles - gtod->cycle_last) & gtod->mask;
 | 
					 | 
				
			||||||
	return v * gtod->mult;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
 | 
					notrace static void do_coarse(clockid_t clk, struct timespec *ts)
 | 
				
			||||||
notrace static int __always_inline do_realtime(struct timespec *ts)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long seq;
 | 
						struct vgtod_ts *base = >od->basetime[clk];
 | 
				
			||||||
	u64 ns;
 | 
						unsigned int seq;
 | 
				
			||||||
	int mode;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	do {
 | 
						do {
 | 
				
			||||||
		seq = gtod_read_begin(gtod);
 | 
							seq = gtod_read_begin(gtod);
 | 
				
			||||||
		mode = gtod->vclock_mode;
 | 
							ts->tv_sec = base->sec;
 | 
				
			||||||
		ts->tv_sec = gtod->wall_time_sec;
 | 
							ts->tv_nsec = base->nsec;
 | 
				
			||||||
		ns = gtod->wall_time_snsec;
 | 
					 | 
				
			||||||
		ns += vgetsns(&mode);
 | 
					 | 
				
			||||||
		ns >>= gtod->shift;
 | 
					 | 
				
			||||||
	} while (unlikely(gtod_read_retry(gtod, seq)));
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
 | 
					 | 
				
			||||||
	ts->tv_nsec = ns;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return mode;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
notrace static int __always_inline do_monotonic(struct timespec *ts)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	unsigned long seq;
 | 
					 | 
				
			||||||
	u64 ns;
 | 
					 | 
				
			||||||
	int mode;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	do {
 | 
					 | 
				
			||||||
		seq = gtod_read_begin(gtod);
 | 
					 | 
				
			||||||
		mode = gtod->vclock_mode;
 | 
					 | 
				
			||||||
		ts->tv_sec = gtod->monotonic_time_sec;
 | 
					 | 
				
			||||||
		ns = gtod->monotonic_time_snsec;
 | 
					 | 
				
			||||||
		ns += vgetsns(&mode);
 | 
					 | 
				
			||||||
		ns >>= gtod->shift;
 | 
					 | 
				
			||||||
	} while (unlikely(gtod_read_retry(gtod, seq)));
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
 | 
					 | 
				
			||||||
	ts->tv_nsec = ns;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return mode;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
notrace static void do_realtime_coarse(struct timespec *ts)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	unsigned long seq;
 | 
					 | 
				
			||||||
	do {
 | 
					 | 
				
			||||||
		seq = gtod_read_begin(gtod);
 | 
					 | 
				
			||||||
		ts->tv_sec = gtod->wall_time_coarse_sec;
 | 
					 | 
				
			||||||
		ts->tv_nsec = gtod->wall_time_coarse_nsec;
 | 
					 | 
				
			||||||
	} while (unlikely(gtod_read_retry(gtod, seq)));
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
notrace static void do_monotonic_coarse(struct timespec *ts)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	unsigned long seq;
 | 
					 | 
				
			||||||
	do {
 | 
					 | 
				
			||||||
		seq = gtod_read_begin(gtod);
 | 
					 | 
				
			||||||
		ts->tv_sec = gtod->monotonic_time_coarse_sec;
 | 
					 | 
				
			||||||
		ts->tv_nsec = gtod->monotonic_time_coarse_nsec;
 | 
					 | 
				
			||||||
	} while (unlikely(gtod_read_retry(gtod, seq)));
 | 
						} while (unlikely(gtod_read_retry(gtod, seq)));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
 | 
					notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	switch (clock) {
 | 
						unsigned int msk;
 | 
				
			||||||
	case CLOCK_REALTIME:
 | 
					 | 
				
			||||||
		if (do_realtime(ts) == VCLOCK_NONE)
 | 
					 | 
				
			||||||
			goto fallback;
 | 
					 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	case CLOCK_MONOTONIC:
 | 
					 | 
				
			||||||
		if (do_monotonic(ts) == VCLOCK_NONE)
 | 
					 | 
				
			||||||
			goto fallback;
 | 
					 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	case CLOCK_REALTIME_COARSE:
 | 
					 | 
				
			||||||
		do_realtime_coarse(ts);
 | 
					 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	case CLOCK_MONOTONIC_COARSE:
 | 
					 | 
				
			||||||
		do_monotonic_coarse(ts);
 | 
					 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	default:
 | 
					 | 
				
			||||||
		goto fallback;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Sort out negative (CPU/FD) and invalid clocks */
 | 
				
			||||||
 | 
						if (unlikely((unsigned int) clock >= MAX_CLOCKS))
 | 
				
			||||||
 | 
							return vdso_fallback_gettime(clock, ts);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Convert the clockid to a bitmask and use it to check which
 | 
				
			||||||
 | 
						 * clocks are handled in the VDSO directly.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						msk = 1U << clock;
 | 
				
			||||||
 | 
						if (likely(msk & VGTOD_HRES)) {
 | 
				
			||||||
 | 
							return do_hres(clock, ts);
 | 
				
			||||||
 | 
						} else if (msk & VGTOD_COARSE) {
 | 
				
			||||||
 | 
							do_coarse(clock, ts);
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
fallback:
 | 
						}
 | 
				
			||||||
	return vdso_fallback_gettime(clock, ts);
 | 
						return vdso_fallback_gettime(clock, ts);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int clock_gettime(clockid_t, struct timespec *)
 | 
					int clock_gettime(clockid_t, struct timespec *)
 | 
				
			||||||
	__attribute__((weak, alias("__vdso_clock_gettime")));
 | 
						__attribute__((weak, alias("__vdso_clock_gettime")));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
 | 
					notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (likely(tv != NULL)) {
 | 
						if (likely(tv != NULL)) {
 | 
				
			||||||
		if (unlikely(do_realtime((struct timespec *)tv) == VCLOCK_NONE))
 | 
							struct timespec *ts = (struct timespec *) tv;
 | 
				
			||||||
			return vdso_fallback_gtod(tv, tz);
 | 
					
 | 
				
			||||||
 | 
							do_hres(CLOCK_REALTIME, ts);
 | 
				
			||||||
		tv->tv_usec /= 1000;
 | 
							tv->tv_usec /= 1000;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if (unlikely(tz != NULL)) {
 | 
						if (unlikely(tz != NULL)) {
 | 
				
			||||||
| 
						 | 
					@ -320,7 +230,7 @@ int gettimeofday(struct timeval *, struct timezone *)
 | 
				
			||||||
notrace time_t __vdso_time(time_t *t)
 | 
					notrace time_t __vdso_time(time_t *t)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	/* This is atomic on x86 so we don't need any locks. */
 | 
						/* This is atomic on x86 so we don't need any locks. */
 | 
				
			||||||
	time_t result = READ_ONCE(gtod->wall_time_sec);
 | 
						time_t result = READ_ONCE(gtod->basetime[CLOCK_REALTIME].sec);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (t)
 | 
						if (t)
 | 
				
			||||||
		*t = result;
 | 
							*t = result;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -31,6 +31,8 @@ void update_vsyscall(struct timekeeper *tk)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
 | 
						int vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
 | 
				
			||||||
	struct vsyscall_gtod_data *vdata = &vsyscall_gtod_data;
 | 
						struct vsyscall_gtod_data *vdata = &vsyscall_gtod_data;
 | 
				
			||||||
 | 
						struct vgtod_ts *base;
 | 
				
			||||||
 | 
						u64 nsec;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Mark the new vclock used. */
 | 
						/* Mark the new vclock used. */
 | 
				
			||||||
	BUILD_BUG_ON(VCLOCK_MAX >= 32);
 | 
						BUILD_BUG_ON(VCLOCK_MAX >= 32);
 | 
				
			||||||
| 
						 | 
					@ -45,34 +47,37 @@ void update_vsyscall(struct timekeeper *tk)
 | 
				
			||||||
	vdata->mult		= tk->tkr_mono.mult;
 | 
						vdata->mult		= tk->tkr_mono.mult;
 | 
				
			||||||
	vdata->shift		= tk->tkr_mono.shift;
 | 
						vdata->shift		= tk->tkr_mono.shift;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vdata->wall_time_sec		= tk->xtime_sec;
 | 
						base = &vdata->basetime[CLOCK_REALTIME];
 | 
				
			||||||
	vdata->wall_time_snsec		= tk->tkr_mono.xtime_nsec;
 | 
						base->sec = tk->xtime_sec;
 | 
				
			||||||
 | 
						base->nsec = tk->tkr_mono.xtime_nsec;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vdata->monotonic_time_sec	= tk->xtime_sec
 | 
						base = &vdata->basetime[CLOCK_TAI];
 | 
				
			||||||
					+ tk->wall_to_monotonic.tv_sec;
 | 
						base->sec = tk->xtime_sec + (s64)tk->tai_offset;
 | 
				
			||||||
	vdata->monotonic_time_snsec	= tk->tkr_mono.xtime_nsec
 | 
						base->nsec = tk->tkr_mono.xtime_nsec;
 | 
				
			||||||
					+ ((u64)tk->wall_to_monotonic.tv_nsec
 | 
					
 | 
				
			||||||
						<< tk->tkr_mono.shift);
 | 
						base = &vdata->basetime[CLOCK_MONOTONIC];
 | 
				
			||||||
	while (vdata->monotonic_time_snsec >=
 | 
						base->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
 | 
				
			||||||
					(((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
 | 
						nsec = tk->tkr_mono.xtime_nsec;
 | 
				
			||||||
		vdata->monotonic_time_snsec -=
 | 
						nsec +=	((u64)tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
 | 
				
			||||||
					((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
 | 
						while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
 | 
				
			||||||
		vdata->monotonic_time_sec++;
 | 
							nsec -= ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
 | 
				
			||||||
 | 
							base->sec++;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						base->nsec = nsec;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vdata->wall_time_coarse_sec	= tk->xtime_sec;
 | 
						base = &vdata->basetime[CLOCK_REALTIME_COARSE];
 | 
				
			||||||
	vdata->wall_time_coarse_nsec	= (long)(tk->tkr_mono.xtime_nsec >>
 | 
						base->sec = tk->xtime_sec;
 | 
				
			||||||
						 tk->tkr_mono.shift);
 | 
						base->nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vdata->monotonic_time_coarse_sec =
 | 
						base = &vdata->basetime[CLOCK_MONOTONIC_COARSE];
 | 
				
			||||||
		vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
 | 
						base->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
 | 
				
			||||||
	vdata->monotonic_time_coarse_nsec =
 | 
						nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
 | 
				
			||||||
		vdata->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
 | 
						nsec += tk->wall_to_monotonic.tv_nsec;
 | 
				
			||||||
 | 
						while (nsec >= NSEC_PER_SEC) {
 | 
				
			||||||
	while (vdata->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
 | 
							nsec -= NSEC_PER_SEC;
 | 
				
			||||||
		vdata->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
 | 
							base->sec++;
 | 
				
			||||||
		vdata->monotonic_time_coarse_sec++;
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						base->nsec = nsec;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	gtod_write_end(vdata);
 | 
						gtod_write_end(vdata);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -5,17 +5,38 @@
 | 
				
			||||||
#include <linux/compiler.h>
 | 
					#include <linux/compiler.h>
 | 
				
			||||||
#include <linux/clocksource.h>
 | 
					#include <linux/clocksource.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <uapi/linux/time.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef BUILD_VDSO32_64
 | 
					#ifdef BUILD_VDSO32_64
 | 
				
			||||||
typedef u64 gtod_long_t;
 | 
					typedef u64 gtod_long_t;
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
typedef unsigned long gtod_long_t;
 | 
					typedef unsigned long gtod_long_t;
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * There is one of these objects in the vvar page for each
 | 
				
			||||||
 | 
					 * vDSO-accelerated clockid.  For high-resolution clocks, this encodes
 | 
				
			||||||
 | 
					 * the time corresponding to vsyscall_gtod_data.cycle_last.  For coarse
 | 
				
			||||||
 | 
					 * clocks, this encodes the actual time.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * To confuse the reader, for high-resolution clocks, nsec is left-shifted
 | 
				
			||||||
 | 
					 * by vsyscall_gtod_data.shift.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					struct vgtod_ts {
 | 
				
			||||||
 | 
						u64		sec;
 | 
				
			||||||
 | 
						u64		nsec;
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define VGTOD_BASES	(CLOCK_TAI + 1)
 | 
				
			||||||
 | 
					#define VGTOD_HRES	(BIT(CLOCK_REALTIME) | BIT(CLOCK_MONOTONIC) | BIT(CLOCK_TAI))
 | 
				
			||||||
 | 
					#define VGTOD_COARSE	(BIT(CLOCK_REALTIME_COARSE) | BIT(CLOCK_MONOTONIC_COARSE))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
 | 
					 * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
 | 
				
			||||||
 * so be carefull by modifying this structure.
 | 
					 * so be carefull by modifying this structure.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
struct vsyscall_gtod_data {
 | 
					struct vsyscall_gtod_data {
 | 
				
			||||||
	unsigned seq;
 | 
						unsigned int	seq;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	int		vclock_mode;
 | 
						int		vclock_mode;
 | 
				
			||||||
	u64		cycle_last;
 | 
						u64		cycle_last;
 | 
				
			||||||
| 
						 | 
					@ -23,15 +44,7 @@ struct vsyscall_gtod_data {
 | 
				
			||||||
	u32		mult;
 | 
						u32		mult;
 | 
				
			||||||
	u32		shift;
 | 
						u32		shift;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* open coded 'struct timespec' */
 | 
						struct vgtod_ts	basetime[VGTOD_BASES];
 | 
				
			||||||
	u64		wall_time_snsec;
 | 
					 | 
				
			||||||
	gtod_long_t	wall_time_sec;
 | 
					 | 
				
			||||||
	gtod_long_t	monotonic_time_sec;
 | 
					 | 
				
			||||||
	u64		monotonic_time_snsec;
 | 
					 | 
				
			||||||
	gtod_long_t	wall_time_coarse_sec;
 | 
					 | 
				
			||||||
	gtod_long_t	wall_time_coarse_nsec;
 | 
					 | 
				
			||||||
	gtod_long_t	monotonic_time_coarse_sec;
 | 
					 | 
				
			||||||
	gtod_long_t	monotonic_time_coarse_nsec;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	int		tz_minuteswest;
 | 
						int		tz_minuteswest;
 | 
				
			||||||
	int		tz_dsttime;
 | 
						int		tz_dsttime;
 | 
				
			||||||
| 
						 | 
					@ -44,9 +57,9 @@ static inline bool vclock_was_used(int vclock)
 | 
				
			||||||
	return READ_ONCE(vclocks_used) & (1 << vclock);
 | 
						return READ_ONCE(vclocks_used) & (1 << vclock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
 | 
					static inline unsigned int gtod_read_begin(const struct vsyscall_gtod_data *s)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned ret;
 | 
						unsigned int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
repeat:
 | 
					repeat:
 | 
				
			||||||
	ret = READ_ONCE(s->seq);
 | 
						ret = READ_ONCE(s->seq);
 | 
				
			||||||
| 
						 | 
					@ -59,7 +72,7 @@ static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline int gtod_read_retry(const struct vsyscall_gtod_data *s,
 | 
					static inline int gtod_read_retry(const struct vsyscall_gtod_data *s,
 | 
				
			||||||
					unsigned start)
 | 
									  unsigned int start)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	smp_rmb();
 | 
						smp_rmb();
 | 
				
			||||||
	return unlikely(s->seq != start);
 | 
						return unlikely(s->seq != start);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -10,6 +10,7 @@
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <linux/clocksource.h>
 | 
				
			||||||
#include <linux/clockchips.h>
 | 
					#include <linux/clockchips.h>
 | 
				
			||||||
#include <linux/interrupt.h>
 | 
					#include <linux/interrupt.h>
 | 
				
			||||||
#include <linux/irq.h>
 | 
					#include <linux/irq.h>
 | 
				
			||||||
| 
						 | 
					@ -105,3 +106,24 @@ void __init time_init(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	late_time_init = x86_late_time_init;
 | 
						late_time_init = x86_late_time_init;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Sanity check the vdso related archdata content.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					void clocksource_arch_init(struct clocksource *cs)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						if (cs->archdata.vclock_mode == VCLOCK_NONE)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (cs->archdata.vclock_mode > VCLOCK_MAX) {
 | 
				
			||||||
 | 
							pr_warn("clocksource %s registered with invalid vclock_mode %d. Disabling vclock.\n",
 | 
				
			||||||
 | 
								cs->name, cs->archdata.vclock_mode);
 | 
				
			||||||
 | 
							cs->archdata.vclock_mode = VCLOCK_NONE;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (cs->mask != CLOCKSOURCE_MASK(64)) {
 | 
				
			||||||
 | 
							pr_warn("clocksource %s registered with invalid mask %016llx. Disabling vclock.\n",
 | 
				
			||||||
 | 
								cs->name, cs->mask);
 | 
				
			||||||
 | 
							cs->archdata.vclock_mode = VCLOCK_NONE;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -241,6 +241,11 @@ static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz
 | 
				
			||||||
	__clocksource_update_freq_scale(cs, 1000, khz);
 | 
						__clocksource_update_freq_scale(cs, 1000, khz);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_ARCH_CLOCKSOURCE_INIT
 | 
				
			||||||
 | 
					extern void clocksource_arch_init(struct clocksource *cs);
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					static inline void clocksource_arch_init(struct clocksource *cs) { }
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern int timekeeping_notify(struct clocksource *clock);
 | 
					extern int timekeeping_notify(struct clocksource *clock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -12,6 +12,10 @@ config CLOCKSOURCE_WATCHDOG
 | 
				
			||||||
config ARCH_CLOCKSOURCE_DATA
 | 
					config ARCH_CLOCKSOURCE_DATA
 | 
				
			||||||
	bool
 | 
						bool
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Architecture has extra clocksource init called from registration
 | 
				
			||||||
 | 
					config ARCH_CLOCKSOURCE_INIT
 | 
				
			||||||
 | 
						bool
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# Clocksources require validation of the clocksource against the last
 | 
					# Clocksources require validation of the clocksource against the last
 | 
				
			||||||
# cycle update - x86/TSC misfeature
 | 
					# cycle update - x86/TSC misfeature
 | 
				
			||||||
config CLOCKSOURCE_VALIDATE_LAST_CYCLE
 | 
					config CLOCKSOURCE_VALIDATE_LAST_CYCLE
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -937,6 +937,8 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						clocksource_arch_init(cs);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Initialize mult/shift and max_idle_ns */
 | 
						/* Initialize mult/shift and max_idle_ns */
 | 
				
			||||||
	__clocksource_update_freq_scale(cs, scale, freq);
 | 
						__clocksource_update_freq_scale(cs, scale, freq);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue