forked from mirrors/linux
		
	power-supply core: * add COOL/WARM/HOT state from JEITA JISC8712:2015 specification * convert simple-battery DT binding to YAML * add long-life charging mode battery/charger drivers: * bq25150: new charger driver * bq27xxx: add support for BQ27z561 and BQ28z610 * max17040: support CAPACITY_ALERT_MIN * sbs-battery: add PEC support * wilco-ec: support long-life charging mode * bq25890: fix DT binding * misc. fixes and cleanups reset drivers: * linkstation: new reset driver -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE72YNB0Y/i3JqeVQT2O7X88g7+poFAl8rMawACgkQ2O7X88g7 +prspw/8CkZiTCl+tnaVdWNvzSQjalKjKeYegKlEolwfZjSQIokeLfz8KSqN2R8q t+TTEJLuO+Y7ZRlpR+inD3UbeAI4XUL9PE8ZwrKycpwUbwIsjICZT5XEbDCjkyIo tXBsB4ainkGy4SD2Yn1/tMQB+S4blqiz7ryhii1OWI8+MMLfR25m0cQQDMm1VlAn WgWza/wRTX+7NpCPSoHkMN4lk2hZz4Ai7X+z8W7TtId91gk+1vQBGOllinG9yd5F bik6Q2qBgHP8ICB6p+9YUenozvvFfEK79euMW99YnZM91DCb0v0YSDm56wIpV58r JGDXN2pHeYxjuDM6pXMbQV31u/NbMKSHMolBYdAtPbxPMS7iHefgoPYUOZDamRte ovjW+pXhEyKkFORp61rwwza3EDBBdKsF2C8OVGywAK8kOZ2y0a6wPEre62Dq2r3U A6eYT+UD+EYV9eunX6Nvx45hi/Vl3R35vJGhU+M77N5YcpW1jSJxWVZJ5AlbwDQK +6Di8GUYrZLZ17+z0bkF8RKnJlfHPVcrvbKty/ePtyYW/DgDpwH9/QQWPIF7BbMG f5csOE5A/MWIENeOWClYtKGMjxzTrKZ5BbjPWYzSo+Gw36fM5pCB1rG6/s+kmGM3 HeGyWMr5fe4eoRBTcM/LFqyM8C1wYXGIMd8u65/RsYh592fgfgw= =SWyv -----END PGP SIGNATURE----- Merge tag 'for-v5.9' of git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-power-supply Pull power supply and reset updates from Sebastian Reichel: "Power-supply core: - add COOL/WARM/HOT state from JEITA JISC8712:2015 specification - convert simple-battery DT binding to YAML - add long-life charging mode Battery/charger drivers: - bq25150: new charger driver - bq27xxx: add support for BQ27z561 and BQ28z610 - max17040: support CAPACITY_ALERT_MIN - sbs-battery: add PEC support - wilco-ec: support long-life charging mode - bq25890: fix DT binding - misc. fixes and cleanups Reset drivers: - linkstation: new reset driver" * tag 'for-v5.9' of git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-power-supply: (32 commits) power: supply: wilco_ec: Add long life charging mode power: supply: bq27xxx_battery: Add the BQ28z610 Battery monitor dt-bindings: power: Add BQ28z610 compatible power: supply: bq27xxx_battery: Add the BQ27Z561 Battery monitor dt-bindings: power: Add BQ27Z561 compatible power: supply: test_power: Fix battery_current initial value power: supply: Fix kerneldoc of power_supply_temp2resist_simple() power: supply: cpcap-battery: Fix kerneldoc of cpcap_battery_read_accumulated() dt-bindings: power: Convert battery.txt to battery.yaml power: supply: rt5033_battery: Fix error code in rt5033_battery_probe() power: supply: max17040: Add POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN power: supply: check if calc_soc succeeded in pm860x_init_battery power: supply: bq2xxxx: Replace HTTP links with HTTPS ones power: reset: add driver for LinkStation power off power: supply: sc27xx: prevent adc * 1000 from overflow math64: New DIV_S64_ROUND_CLOSEST helper power: fix duplicated words in bq2415x_charger.h power: Convert to DEFINE_SHOW_ATTRIBUTE power: reset: keystone-reset: Replace HTTP links with HTTPS ones power: supply: bq25150 introduce the bq25150 ...
		
			
				
	
	
		
			303 lines
		
	
	
	
		
			7.1 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			303 lines
		
	
	
	
		
			7.1 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/* SPDX-License-Identifier: GPL-2.0 */
 | 
						|
#ifndef _LINUX_MATH64_H
 | 
						|
#define _LINUX_MATH64_H
 | 
						|
 | 
						|
#include <linux/types.h>
 | 
						|
#include <vdso/math64.h>
 | 
						|
#include <asm/div64.h>
 | 
						|
 | 
						|
#if BITS_PER_LONG == 64
 | 
						|
 | 
						|
#define div64_long(x, y) div64_s64((x), (y))
 | 
						|
#define div64_ul(x, y)   div64_u64((x), (y))
 | 
						|
 | 
						|
/**
 | 
						|
 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
 | 
						|
 * @dividend: unsigned 64bit dividend
 | 
						|
 * @divisor: unsigned 32bit divisor
 | 
						|
 * @remainder: pointer to unsigned 32bit remainder
 | 
						|
 *
 | 
						|
 * Return: sets ``*remainder``, then returns dividend / divisor
 | 
						|
 *
 | 
						|
 * This is commonly provided by 32bit archs to provide an optimized 64bit
 | 
						|
 * divide.
 | 
						|
 */
 | 
						|
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
 | 
						|
{
 | 
						|
	*remainder = dividend % divisor;
 | 
						|
	return dividend / divisor;
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
 | 
						|
 * @dividend: signed 64bit dividend
 | 
						|
 * @divisor: signed 32bit divisor
 | 
						|
 * @remainder: pointer to signed 32bit remainder
 | 
						|
 *
 | 
						|
 * Return: sets ``*remainder``, then returns dividend / divisor
 | 
						|
 */
 | 
						|
static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
 | 
						|
{
 | 
						|
	*remainder = dividend % divisor;
 | 
						|
	return dividend / divisor;
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
 | 
						|
 * @dividend: unsigned 64bit dividend
 | 
						|
 * @divisor: unsigned 64bit divisor
 | 
						|
 * @remainder: pointer to unsigned 64bit remainder
 | 
						|
 *
 | 
						|
 * Return: sets ``*remainder``, then returns dividend / divisor
 | 
						|
 */
 | 
						|
static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
 | 
						|
{
 | 
						|
	*remainder = dividend % divisor;
 | 
						|
	return dividend / divisor;
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * div64_u64 - unsigned 64bit divide with 64bit divisor
 | 
						|
 * @dividend: unsigned 64bit dividend
 | 
						|
 * @divisor: unsigned 64bit divisor
 | 
						|
 *
 | 
						|
 * Return: dividend / divisor
 | 
						|
 */
 | 
						|
static inline u64 div64_u64(u64 dividend, u64 divisor)
 | 
						|
{
 | 
						|
	return dividend / divisor;
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * div64_s64 - signed 64bit divide with 64bit divisor
 | 
						|
 * @dividend: signed 64bit dividend
 | 
						|
 * @divisor: signed 64bit divisor
 | 
						|
 *
 | 
						|
 * Return: dividend / divisor
 | 
						|
 */
 | 
						|
static inline s64 div64_s64(s64 dividend, s64 divisor)
 | 
						|
{
 | 
						|
	return dividend / divisor;
 | 
						|
}
 | 
						|
 | 
						|
#elif BITS_PER_LONG == 32
 | 
						|
 | 
						|
#define div64_long(x, y) div_s64((x), (y))
 | 
						|
#define div64_ul(x, y)   div_u64((x), (y))
 | 
						|
 | 
						|
#ifndef div_u64_rem
 | 
						|
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
 | 
						|
{
 | 
						|
	*remainder = do_div(dividend, divisor);
 | 
						|
	return dividend;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
#ifndef div_s64_rem
 | 
						|
extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
 | 
						|
#endif
 | 
						|
 | 
						|
#ifndef div64_u64_rem
 | 
						|
extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
 | 
						|
#endif
 | 
						|
 | 
						|
#ifndef div64_u64
 | 
						|
extern u64 div64_u64(u64 dividend, u64 divisor);
 | 
						|
#endif
 | 
						|
 | 
						|
#ifndef div64_s64
 | 
						|
extern s64 div64_s64(s64 dividend, s64 divisor);
 | 
						|
#endif
 | 
						|
 | 
						|
#endif /* BITS_PER_LONG */
 | 
						|
 | 
						|
/**
 | 
						|
 * div_u64 - unsigned 64bit divide with 32bit divisor
 | 
						|
 * @dividend: unsigned 64bit dividend
 | 
						|
 * @divisor: unsigned 32bit divisor
 | 
						|
 *
 | 
						|
 * This is the most common 64bit divide and should be used if possible,
 | 
						|
 * as many 32bit archs can optimize this variant better than a full 64bit
 | 
						|
 * divide.
 | 
						|
 */
 | 
						|
#ifndef div_u64
 | 
						|
static inline u64 div_u64(u64 dividend, u32 divisor)
 | 
						|
{
 | 
						|
	u32 remainder;
 | 
						|
	return div_u64_rem(dividend, divisor, &remainder);
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
/**
 | 
						|
 * div_s64 - signed 64bit divide with 32bit divisor
 | 
						|
 * @dividend: signed 64bit dividend
 | 
						|
 * @divisor: signed 32bit divisor
 | 
						|
 */
 | 
						|
#ifndef div_s64
 | 
						|
static inline s64 div_s64(s64 dividend, s32 divisor)
 | 
						|
{
 | 
						|
	s32 remainder;
 | 
						|
	return div_s64_rem(dividend, divisor, &remainder);
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
 | 
						|
 | 
						|
#ifndef mul_u32_u32
 | 
						|
/*
 | 
						|
 * Many a GCC version messes this up and generates a 64x64 mult :-(
 | 
						|
 */
 | 
						|
static inline u64 mul_u32_u32(u32 a, u32 b)
 | 
						|
{
 | 
						|
	return (u64)a * b;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
 | 
						|
 | 
						|
#ifndef mul_u64_u32_shr
 | 
						|
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
 | 
						|
{
 | 
						|
	return (u64)(((unsigned __int128)a * mul) >> shift);
 | 
						|
}
 | 
						|
#endif /* mul_u64_u32_shr */
 | 
						|
 | 
						|
#ifndef mul_u64_u64_shr
 | 
						|
static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
 | 
						|
{
 | 
						|
	return (u64)(((unsigned __int128)a * mul) >> shift);
 | 
						|
}
 | 
						|
#endif /* mul_u64_u64_shr */
 | 
						|
 | 
						|
#else
 | 
						|
 | 
						|
#ifndef mul_u64_u32_shr
 | 
						|
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
 | 
						|
{
 | 
						|
	u32 ah, al;
 | 
						|
	u64 ret;
 | 
						|
 | 
						|
	al = a;
 | 
						|
	ah = a >> 32;
 | 
						|
 | 
						|
	ret = mul_u32_u32(al, mul) >> shift;
 | 
						|
	if (ah)
 | 
						|
		ret += mul_u32_u32(ah, mul) << (32 - shift);
 | 
						|
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
#endif /* mul_u64_u32_shr */
 | 
						|
 | 
						|
#ifndef mul_u64_u64_shr
 | 
						|
static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
 | 
						|
{
 | 
						|
	union {
 | 
						|
		u64 ll;
 | 
						|
		struct {
 | 
						|
#ifdef __BIG_ENDIAN
 | 
						|
			u32 high, low;
 | 
						|
#else
 | 
						|
			u32 low, high;
 | 
						|
#endif
 | 
						|
		} l;
 | 
						|
	} rl, rm, rn, rh, a0, b0;
 | 
						|
	u64 c;
 | 
						|
 | 
						|
	a0.ll = a;
 | 
						|
	b0.ll = b;
 | 
						|
 | 
						|
	rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
 | 
						|
	rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
 | 
						|
	rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
 | 
						|
	rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
 | 
						|
 | 
						|
	/*
 | 
						|
	 * Each of these lines computes a 64-bit intermediate result into "c",
 | 
						|
	 * starting at bits 32-95.  The low 32-bits go into the result of the
 | 
						|
	 * multiplication, the high 32-bits are carried into the next step.
 | 
						|
	 */
 | 
						|
	rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
 | 
						|
	rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
 | 
						|
	rh.l.high = (c >> 32) + rh.l.high;
 | 
						|
 | 
						|
	/*
 | 
						|
	 * The 128-bit result of the multiplication is in rl.ll and rh.ll,
 | 
						|
	 * shift it right and throw away the high part of the result.
 | 
						|
	 */
 | 
						|
	if (shift == 0)
 | 
						|
		return rl.ll;
 | 
						|
	if (shift < 64)
 | 
						|
		return (rl.ll >> shift) | (rh.ll << (64 - shift));
 | 
						|
	return rh.ll >> (shift & 63);
 | 
						|
}
 | 
						|
#endif /* mul_u64_u64_shr */
 | 
						|
 | 
						|
#endif
 | 
						|
 | 
						|
#ifndef mul_u64_u32_div
 | 
						|
static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
 | 
						|
{
 | 
						|
	union {
 | 
						|
		u64 ll;
 | 
						|
		struct {
 | 
						|
#ifdef __BIG_ENDIAN
 | 
						|
			u32 high, low;
 | 
						|
#else
 | 
						|
			u32 low, high;
 | 
						|
#endif
 | 
						|
		} l;
 | 
						|
	} u, rl, rh;
 | 
						|
 | 
						|
	u.ll = a;
 | 
						|
	rl.ll = mul_u32_u32(u.l.low, mul);
 | 
						|
	rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
 | 
						|
 | 
						|
	/* Bits 32-63 of the result will be in rh.l.low. */
 | 
						|
	rl.l.high = do_div(rh.ll, divisor);
 | 
						|
 | 
						|
	/* Bits 0-31 of the result will be in rl.l.low.	*/
 | 
						|
	do_div(rl.ll, divisor);
 | 
						|
 | 
						|
	rl.l.high = rh.l.low;
 | 
						|
	return rl.ll;
 | 
						|
}
 | 
						|
#endif /* mul_u64_u32_div */
 | 
						|
 | 
						|
u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
 | 
						|
 | 
						|
#define DIV64_U64_ROUND_UP(ll, d)	\
 | 
						|
	({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
 | 
						|
 | 
						|
/**
 | 
						|
 * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer
 | 
						|
 * @dividend: unsigned 64bit dividend
 | 
						|
 * @divisor: unsigned 64bit divisor
 | 
						|
 *
 | 
						|
 * Divide unsigned 64bit dividend by unsigned 64bit divisor
 | 
						|
 * and round to closest integer.
 | 
						|
 *
 | 
						|
 * Return: dividend / divisor rounded to nearest integer
 | 
						|
 */
 | 
						|
#define DIV64_U64_ROUND_CLOSEST(dividend, divisor)	\
 | 
						|
	({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); })
 | 
						|
 | 
						|
/*
 | 
						|
 * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer
 | 
						|
 * @dividend: signed 64bit dividend
 | 
						|
 * @divisor: signed 32bit divisor
 | 
						|
 *
 | 
						|
 * Divide signed 64bit dividend by signed 32bit divisor
 | 
						|
 * and round to closest integer.
 | 
						|
 *
 | 
						|
 * Return: dividend / divisor rounded to nearest integer
 | 
						|
 */
 | 
						|
#define DIV_S64_ROUND_CLOSEST(dividend, divisor)(	\
 | 
						|
{							\
 | 
						|
	s64 __x = (dividend);				\
 | 
						|
	s32 __d = (divisor);				\
 | 
						|
	((__x > 0) == (__d > 0)) ?			\
 | 
						|
		div_s64((__x + (__d / 2)), __d) :	\
 | 
						|
		div_s64((__x - (__d / 2)), __d);	\
 | 
						|
}							\
 | 
						|
)
 | 
						|
#endif /* _LINUX_MATH64_H */
 |