forked from mirrors/linux
		
	math64, timers: Fix 32bit mul_u64_u32_shr() and friends
It turns out that while GCC-4.4 manages to generate 32x32->64 mult instructions for the 32bit mul_u64_u32_shr() code, any GCC after that fails horribly. Fix this by providing an explicit mul_u32_u32() function which can be architcture provided. Reported-by: Chris Metcalf <cmetcalf@mellanox.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Chris Metcalf <cmetcalf@mellanox.com> [for tile] Cc: Christopher S. Hall <christopher.s.hall@intel.com> Cc: David Gibson <david@gibson.dropbear.id.au> Cc: John Stultz <john.stultz@linaro.org> Cc: Laurent Vivier <lvivier@redhat.com> Cc: Liav Rehana <liavr@mellanox.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Parit Bhargava <prarit@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Richard Cochran <richardcochran@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20161209083011.GD15765@worktop.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
							parent
							
								
									e96f8f18c8
								
							
						
					
					
						commit
						9e3d6223d2
					
				
					 4 changed files with 43 additions and 9 deletions
				
			
		| 
						 | 
				
			
			@ -5,7 +5,6 @@ generic-y += bug.h
 | 
			
		|||
generic-y += bugs.h
 | 
			
		||||
generic-y += clkdev.h
 | 
			
		||||
generic-y += cputime.h
 | 
			
		||||
generic-y += div64.h
 | 
			
		||||
generic-y += emergency-restart.h
 | 
			
		||||
generic-y += errno.h
 | 
			
		||||
generic-y += exec.h
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										14
									
								
								arch/tile/include/asm/div64.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								arch/tile/include/asm/div64.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,14 @@
 | 
			
		|||
#ifndef _ASM_TILE_DIV64_H
 | 
			
		||||
#define _ASM_TILE_DIV64_H
 | 
			
		||||
 | 
			
		||||
#ifdef __tilegx__
 | 
			
		||||
static inline u64 mul_u32_u32(u32 a, u32 b)
 | 
			
		||||
{
 | 
			
		||||
	return __insn_mul_lu_lu(a, b);
 | 
			
		||||
}
 | 
			
		||||
#define mul_u32_u32 mul_u32_u32
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#include <asm-generic/div64.h>
 | 
			
		||||
 | 
			
		||||
#endif /* _ASM_TILE_DIV64_H */
 | 
			
		||||
| 
						 | 
				
			
			@ -59,6 +59,17 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
 | 
			
		|||
}
 | 
			
		||||
#define div_u64_rem	div_u64_rem
 | 
			
		||||
 | 
			
		||||
static inline u64 mul_u32_u32(u32 a, u32 b)
 | 
			
		||||
{
 | 
			
		||||
	u32 high, low;
 | 
			
		||||
 | 
			
		||||
	asm ("mull %[b]" : "=a" (low), "=d" (high)
 | 
			
		||||
			 : [a] "a" (a), [b] "rm" (b) );
 | 
			
		||||
 | 
			
		||||
	return low | ((u64)high) << 32;
 | 
			
		||||
}
 | 
			
		||||
#define mul_u32_u32 mul_u32_u32
 | 
			
		||||
 | 
			
		||||
#else
 | 
			
		||||
# include <asm-generic/div64.h>
 | 
			
		||||
#endif /* CONFIG_X86_32 */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -133,6 +133,16 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
 | 
			
		|||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifndef mul_u32_u32
 | 
			
		||||
/*
 | 
			
		||||
 * Many a GCC version messes this up and generates a 64x64 mult :-(
 | 
			
		||||
 */
 | 
			
		||||
static inline u64 mul_u32_u32(u32 a, u32 b)
 | 
			
		||||
{
 | 
			
		||||
	return (u64)a * b;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
 | 
			
		||||
 | 
			
		||||
#ifndef mul_u64_u32_shr
 | 
			
		||||
| 
						 | 
				
			
			@ -160,9 +170,9 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
 | 
			
		|||
	al = a;
 | 
			
		||||
	ah = a >> 32;
 | 
			
		||||
 | 
			
		||||
	ret = ((u64)al * mul) >> shift;
 | 
			
		||||
	ret = mul_u32_u32(al, mul) >> shift;
 | 
			
		||||
	if (ah)
 | 
			
		||||
		ret += ((u64)ah * mul) << (32 - shift);
 | 
			
		||||
		ret += mul_u32_u32(ah, mul) << (32 - shift);
 | 
			
		||||
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -186,10 +196,10 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
 | 
			
		|||
	a0.ll = a;
 | 
			
		||||
	b0.ll = b;
 | 
			
		||||
 | 
			
		||||
	rl.ll = (u64)a0.l.low * b0.l.low;
 | 
			
		||||
	rm.ll = (u64)a0.l.low * b0.l.high;
 | 
			
		||||
	rn.ll = (u64)a0.l.high * b0.l.low;
 | 
			
		||||
	rh.ll = (u64)a0.l.high * b0.l.high;
 | 
			
		||||
	rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
 | 
			
		||||
	rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
 | 
			
		||||
	rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
 | 
			
		||||
	rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Each of these lines computes a 64-bit intermediate result into "c",
 | 
			
		||||
| 
						 | 
				
			
			@ -229,8 +239,8 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
 | 
			
		|||
	} u, rl, rh;
 | 
			
		||||
 | 
			
		||||
	u.ll = a;
 | 
			
		||||
	rl.ll = (u64)u.l.low * mul;
 | 
			
		||||
	rh.ll = (u64)u.l.high * mul + rl.l.high;
 | 
			
		||||
	rl.ll = mul_u32_u32(u.l.low, mul);
 | 
			
		||||
	rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
 | 
			
		||||
 | 
			
		||||
	/* Bits 32-63 of the result will be in rh.l.low. */
 | 
			
		||||
	rl.l.high = do_div(rh.ll, divisor);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue