forked from mirrors/linux
		
	timekeeping: Let timekeeping_cycles_to_ns() handle both under and overflow
For the case !CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE, forego overflow protection in the range (mask << 1) < delta <= mask, and interpret it always as an inconsistency between CPU clock values. That allows slightly neater code, and it is on a slow path so has no effect on performance. Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20240325064023.2997-19-adrian.hunter@intel.com
This commit is contained in:
		
							parent
							
								
									fcf190c369
								
							
						
					
					
						commit
						135225a363
					
				
					 1 changed files with 13 additions and 18 deletions
				
			
		|  | @ -266,17 +266,14 @@ static inline u64 timekeeping_debug_get_ns(const struct tk_read_base *tkr) | ||||||
| 	 * Try to catch underflows by checking if we are seeing small | 	 * Try to catch underflows by checking if we are seeing small | ||||||
| 	 * mask-relative negative values. | 	 * mask-relative negative values. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (unlikely((~delta & mask) < (mask >> 3))) { | 	if (unlikely((~delta & mask) < (mask >> 3))) | ||||||
| 		tk->underflow_seen = 1; | 		tk->underflow_seen = 1; | ||||||
| 		now = last; |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	/* Cap delta value to the max_cycles values to avoid mult overflows */ | 	/* Check for multiplication overflows */ | ||||||
| 	if (unlikely(delta > max)) { | 	if (unlikely(delta > max)) | ||||||
| 		tk->overflow_seen = 1; | 		tk->overflow_seen = 1; | ||||||
| 		now = last + max; |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
|  | 	/* timekeeping_cycles_to_ns() handles both under and overflow */ | ||||||
| 	return timekeeping_cycles_to_ns(tkr, now); | 	return timekeeping_cycles_to_ns(tkr, now); | ||||||
| } | } | ||||||
| #else | #else | ||||||
|  | @ -375,19 +372,17 @@ static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 c | ||||||
| 	u64 mask = tkr->mask, delta = (cycles - tkr->cycle_last) & mask; | 	u64 mask = tkr->mask, delta = (cycles - tkr->cycle_last) & mask; | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * This detects the case where the delta overflows the multiplication | 	 * This detects both negative motion and the case where the delta | ||||||
| 	 * with tkr->mult. | 	 * overflows the multiplication with tkr->mult. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (unlikely(delta > tkr->clock->max_cycles)) { | 	if (unlikely(delta > tkr->clock->max_cycles)) { | ||||||
| 		if (IS_ENABLED(CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE)) { | 		/*
 | ||||||
| 			/*
 | 		 * Handle clocksource inconsistency between CPUs to prevent | ||||||
| 			 * Handle clocksource inconsistency between CPUs to prevent | 		 * time from going backwards by checking for the MSB of the | ||||||
| 			 * time from going backwards by checking for the MSB of the | 		 * mask being set in the delta. | ||||||
| 			 * mask being set in the delta. | 		 */ | ||||||
| 			 */ | 		if (delta & ~(mask >> 1)) | ||||||
| 			if (unlikely(delta & ~(mask >> 1))) | 			return tkr->xtime_nsec >> tkr->shift; | ||||||
| 				return tkr->xtime_nsec >> tkr->shift; |  | ||||||
| 		} |  | ||||||
| 
 | 
 | ||||||
| 		return delta_to_ns_safe(tkr, delta); | 		return delta_to_ns_safe(tkr, delta); | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Adrian Hunter
						Adrian Hunter