forked from mirrors/linux
		
	arm64/arch_timer: Provide noinstr sched_clock_read() functions
With the intent to provide local_clock_noinstr(), a variant of local_clock() that's safe to be called from noinstr code (with the assumption that any such code will already be non-preemptible), prepare for things by providing a noinstr sched_clock_read() function. Specifically, preempt_enable_*() calls out to schedule(), which upsets noinstr validation efforts. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Michael Kelley <mikelley@microsoft.com> # Hyper-V Link: https://lore.kernel.org/r/20230519102715.435618812@infradead.org
This commit is contained in:
		
							parent
							
								
									c1d26c0f02
								
							
						
					
					
						commit
						24ee7607b2
					
				
					 2 changed files with 41 additions and 21 deletions
				
			
		|  | @ -88,13 +88,7 @@ static inline notrace u64 arch_timer_read_cntvct_el0(void) | ||||||
| 
 | 
 | ||||||
| #define arch_timer_reg_read_stable(reg)					\ | #define arch_timer_reg_read_stable(reg)					\ | ||||||
| 	({								\ | 	({								\ | ||||||
| 		u64 _val;						\ | 		erratum_handler(read_ ## reg)();			\ | ||||||
| 									\ |  | ||||||
| 		preempt_disable_notrace();				\ |  | ||||||
| 		_val = erratum_handler(read_ ## reg)();			\ |  | ||||||
| 		preempt_enable_notrace();				\ |  | ||||||
| 									\ |  | ||||||
| 		_val;							\ |  | ||||||
| 	}) | 	}) | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  |  | ||||||
|  | @ -191,22 +191,40 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, | ||||||
| 	return val; | 	return val; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static notrace u64 arch_counter_get_cntpct_stable(void) | static noinstr u64 raw_counter_get_cntpct_stable(void) | ||||||
| { | { | ||||||
| 	return __arch_counter_get_cntpct_stable(); | 	return __arch_counter_get_cntpct_stable(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static notrace u64 arch_counter_get_cntpct(void) | static notrace u64 arch_counter_get_cntpct_stable(void) | ||||||
|  | { | ||||||
|  | 	u64 val; | ||||||
|  | 	preempt_disable_notrace(); | ||||||
|  | 	val = __arch_counter_get_cntpct_stable(); | ||||||
|  | 	preempt_enable_notrace(); | ||||||
|  | 	return val; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static noinstr u64 arch_counter_get_cntpct(void) | ||||||
| { | { | ||||||
| 	return __arch_counter_get_cntpct(); | 	return __arch_counter_get_cntpct(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static notrace u64 arch_counter_get_cntvct_stable(void) | static noinstr u64 raw_counter_get_cntvct_stable(void) | ||||||
| { | { | ||||||
| 	return __arch_counter_get_cntvct_stable(); | 	return __arch_counter_get_cntvct_stable(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static notrace u64 arch_counter_get_cntvct(void) | static notrace u64 arch_counter_get_cntvct_stable(void) | ||||||
|  | { | ||||||
|  | 	u64 val; | ||||||
|  | 	preempt_disable_notrace(); | ||||||
|  | 	val = __arch_counter_get_cntvct_stable(); | ||||||
|  | 	preempt_enable_notrace(); | ||||||
|  | 	return val; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static noinstr u64 arch_counter_get_cntvct(void) | ||||||
| { | { | ||||||
| 	return __arch_counter_get_cntvct(); | 	return __arch_counter_get_cntvct(); | ||||||
| } | } | ||||||
|  | @ -753,14 +771,14 @@ static int arch_timer_set_next_event_phys(unsigned long evt, | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo) | static noinstr u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo) | ||||||
| { | { | ||||||
| 	u32 cnt_lo, cnt_hi, tmp_hi; | 	u32 cnt_lo, cnt_hi, tmp_hi; | ||||||
| 
 | 
 | ||||||
| 	do { | 	do { | ||||||
| 		cnt_hi = readl_relaxed(t->base + offset_lo + 4); | 		cnt_hi = __raw_readl(t->base + offset_lo + 4); | ||||||
| 		cnt_lo = readl_relaxed(t->base + offset_lo); | 		cnt_lo = __raw_readl(t->base + offset_lo); | ||||||
| 		tmp_hi = readl_relaxed(t->base + offset_lo + 4); | 		tmp_hi = __raw_readl(t->base + offset_lo + 4); | ||||||
| 	} while (cnt_hi != tmp_hi); | 	} while (cnt_hi != tmp_hi); | ||||||
| 
 | 
 | ||||||
| 	return ((u64) cnt_hi << 32) | cnt_lo; | 	return ((u64) cnt_hi << 32) | cnt_lo; | ||||||
|  | @ -1060,7 +1078,7 @@ bool arch_timer_evtstrm_available(void) | ||||||
| 	return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available); | 	return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static u64 arch_counter_get_cntvct_mem(void) | static noinstr u64 arch_counter_get_cntvct_mem(void) | ||||||
| { | { | ||||||
| 	return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO); | 	return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO); | ||||||
| } | } | ||||||
|  | @ -1074,6 +1092,7 @@ struct arch_timer_kvm_info *arch_timer_get_kvm_info(void) | ||||||
| 
 | 
 | ||||||
| static void __init arch_counter_register(unsigned type) | static void __init arch_counter_register(unsigned type) | ||||||
| { | { | ||||||
|  | 	u64 (*scr)(void); | ||||||
| 	u64 start_count; | 	u64 start_count; | ||||||
| 	int width; | 	int width; | ||||||
| 
 | 
 | ||||||
|  | @ -1083,21 +1102,28 @@ static void __init arch_counter_register(unsigned type) | ||||||
| 
 | 
 | ||||||
| 		if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) || | 		if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) || | ||||||
| 		    arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) { | 		    arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) { | ||||||
| 			if (arch_timer_counter_has_wa()) | 			if (arch_timer_counter_has_wa()) { | ||||||
| 				rd = arch_counter_get_cntvct_stable; | 				rd = arch_counter_get_cntvct_stable; | ||||||
| 			else | 				scr = raw_counter_get_cntvct_stable; | ||||||
|  | 			} else { | ||||||
| 				rd = arch_counter_get_cntvct; | 				rd = arch_counter_get_cntvct; | ||||||
|  | 				scr = arch_counter_get_cntvct; | ||||||
|  | 			} | ||||||
| 		} else { | 		} else { | ||||||
| 			if (arch_timer_counter_has_wa()) | 			if (arch_timer_counter_has_wa()) { | ||||||
| 				rd = arch_counter_get_cntpct_stable; | 				rd = arch_counter_get_cntpct_stable; | ||||||
| 			else | 				scr = raw_counter_get_cntpct_stable; | ||||||
|  | 			} else { | ||||||
| 				rd = arch_counter_get_cntpct; | 				rd = arch_counter_get_cntpct; | ||||||
|  | 				scr = arch_counter_get_cntpct; | ||||||
|  | 			} | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		arch_timer_read_counter = rd; | 		arch_timer_read_counter = rd; | ||||||
| 		clocksource_counter.vdso_clock_mode = vdso_default; | 		clocksource_counter.vdso_clock_mode = vdso_default; | ||||||
| 	} else { | 	} else { | ||||||
| 		arch_timer_read_counter = arch_counter_get_cntvct_mem; | 		arch_timer_read_counter = arch_counter_get_cntvct_mem; | ||||||
|  | 		scr = arch_counter_get_cntvct_mem; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	width = arch_counter_get_width(); | 	width = arch_counter_get_width(); | ||||||
|  | @ -1113,7 +1139,7 @@ static void __init arch_counter_register(unsigned type) | ||||||
| 	timecounter_init(&arch_timer_kvm_info.timecounter, | 	timecounter_init(&arch_timer_kvm_info.timecounter, | ||||||
| 			 &cyclecounter, start_count); | 			 &cyclecounter, start_count); | ||||||
| 
 | 
 | ||||||
| 	sched_clock_register(arch_timer_read_counter, width, arch_timer_rate); | 	sched_clock_register(scr, width, arch_timer_rate); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void arch_timer_stop(struct clock_event_device *clk) | static void arch_timer_stop(struct clock_event_device *clk) | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Peter Zijlstra
						Peter Zijlstra