mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	uaccess: add missing __must_check attributes
The usercopy implementation comments describe that callers of the copy_*_user() family of functions must always have their return values checked. This can be enforced at compile time with __must_check, so add it where needed. Link: http://lkml.kernel.org/r/201908251609.ADAD5CAAC1@keescook Signed-off-by: Kees Cook <keescook@chromium.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									d5372c3913
								
							
						
					
					
						commit
						9dd819a151
					
				
					 2 changed files with 12 additions and 11 deletions
				
			
		| 
						 | 
				
			
			@ -134,7 +134,7 @@ static inline void copy_overflow(int size, unsigned long count)
 | 
			
		|||
	WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static __always_inline bool
 | 
			
		||||
static __always_inline __must_check bool
 | 
			
		||||
check_copy_size(const void *addr, size_t bytes, bool is_source)
 | 
			
		||||
{
 | 
			
		||||
	int sz = __compiletime_object_size(addr);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -55,7 +55,7 @@
 | 
			
		|||
 * as usual) and both source and destination can trigger faults.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
static __always_inline unsigned long
 | 
			
		||||
static __always_inline __must_check unsigned long
 | 
			
		||||
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
 | 
			
		||||
{
 | 
			
		||||
	kasan_check_write(to, n);
 | 
			
		||||
| 
						 | 
				
			
			@ -63,7 +63,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
 | 
			
		|||
	return raw_copy_from_user(to, from, n);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static __always_inline unsigned long
 | 
			
		||||
static __always_inline __must_check unsigned long
 | 
			
		||||
__copy_from_user(void *to, const void __user *from, unsigned long n)
 | 
			
		||||
{
 | 
			
		||||
	might_fault();
 | 
			
		||||
| 
						 | 
				
			
			@ -85,7 +85,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
 | 
			
		|||
 * The caller should also make sure he pins the user space address
 | 
			
		||||
 * so that we don't result in page fault and sleep.
 | 
			
		||||
 */
 | 
			
		||||
static __always_inline unsigned long
 | 
			
		||||
static __always_inline __must_check unsigned long
 | 
			
		||||
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
 | 
			
		||||
{
 | 
			
		||||
	kasan_check_read(from, n);
 | 
			
		||||
| 
						 | 
				
			
			@ -93,7 +93,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
 | 
			
		|||
	return raw_copy_to_user(to, from, n);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static __always_inline unsigned long
 | 
			
		||||
static __always_inline __must_check unsigned long
 | 
			
		||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
 | 
			
		||||
{
 | 
			
		||||
	might_fault();
 | 
			
		||||
| 
						 | 
				
			
			@ -103,7 +103,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
#ifdef INLINE_COPY_FROM_USER
 | 
			
		||||
static inline unsigned long
 | 
			
		||||
static inline __must_check unsigned long
 | 
			
		||||
_copy_from_user(void *to, const void __user *from, unsigned long n)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long res = n;
 | 
			
		||||
| 
						 | 
				
			
			@ -117,12 +117,12 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
 | 
			
		|||
	return res;
 | 
			
		||||
}
 | 
			
		||||
#else
 | 
			
		||||
extern unsigned long
 | 
			
		||||
extern __must_check unsigned long
 | 
			
		||||
_copy_from_user(void *, const void __user *, unsigned long);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef INLINE_COPY_TO_USER
 | 
			
		||||
static inline unsigned long
 | 
			
		||||
static inline __must_check unsigned long
 | 
			
		||||
_copy_to_user(void __user *to, const void *from, unsigned long n)
 | 
			
		||||
{
 | 
			
		||||
	might_fault();
 | 
			
		||||
| 
						 | 
				
			
			@ -133,7 +133,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n)
 | 
			
		|||
	return n;
 | 
			
		||||
}
 | 
			
		||||
#else
 | 
			
		||||
extern unsigned long
 | 
			
		||||
extern __must_check unsigned long
 | 
			
		||||
_copy_to_user(void __user *, const void *, unsigned long);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -222,8 +222,9 @@ static inline bool pagefault_disabled(void)
 | 
			
		|||
 | 
			
		||||
#ifndef ARCH_HAS_NOCACHE_UACCESS
 | 
			
		||||
 | 
			
		||||
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
 | 
			
		||||
				const void __user *from, unsigned long n)
 | 
			
		||||
static inline __must_check unsigned long
 | 
			
		||||
__copy_from_user_inatomic_nocache(void *to, const void __user *from,
 | 
			
		||||
				  unsigned long n)
 | 
			
		||||
{
 | 
			
		||||
	return __copy_from_user_inatomic(to, from, n);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue