forked from mirrors/linux
		
	slab: add __alloc_size attributes for better bounds checking
As already done in GrapheneOS, add the __alloc_size attribute for regular kmalloc interfaces, to provide additional hinting for better bounds checking, assisting CONFIG_FORTIFY_SOURCE and other compiler optimizations. Link: https://lkml.kernel.org/r/20210930222704.2631604-5-keescook@chromium.org Signed-off-by: Kees Cook <keescook@chromium.org> Co-developed-by: Daniel Micay <danielmicay@gmail.com> Signed-off-by: Daniel Micay <danielmicay@gmail.com> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Andy Whitcroft <apw@canonical.com> Cc: Dennis Zhou <dennis@kernel.org> Cc: Dwaipayan Ray <dwaipayanray1@gmail.com> Cc: Joe Perches <joe@perches.com> Cc: Lukas Bulwahn <lukas.bulwahn@gmail.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Nathan Chancellor <nathan@kernel.org> Cc: Tejun Heo <tj@kernel.org> Cc: Alexandre Bounine <alex.bou9@gmail.com> Cc: Gustavo A. R. Silva <gustavoars@kernel.org> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Jing Xiangfeng <jingxiangfeng@huawei.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: kernel test robot <lkp@intel.com> Cc: Matt Porter <mporter@kernel.crashing.org> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Souptick Joarder <jrdr.linux@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									72d67229f5
								
							
						
					
					
						commit
						c37495d625
					
				
					 1 changed files with 33 additions and 28 deletions
				
			
		|  | @ -181,7 +181,7 @@ int kmem_cache_shrink(struct kmem_cache *s); | ||||||
| /*
 | /*
 | ||||||
|  * Common kmalloc functions provided by all allocators |  * Common kmalloc functions provided by all allocators | ||||||
|  */ |  */ | ||||||
| void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags); | void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2); | ||||||
| void kfree(const void *objp); | void kfree(const void *objp); | ||||||
| void kfree_sensitive(const void *objp); | void kfree_sensitive(const void *objp); | ||||||
| size_t __ksize(const void *objp); | size_t __ksize(const void *objp); | ||||||
|  | @ -425,7 +425,7 @@ static __always_inline unsigned int __kmalloc_index(size_t size, | ||||||
| #define kmalloc_index(s) __kmalloc_index(s, true) | #define kmalloc_index(s) __kmalloc_index(s, true) | ||||||
| #endif /* !CONFIG_SLOB */ | #endif /* !CONFIG_SLOB */ | ||||||
| 
 | 
 | ||||||
| void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; | void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); | ||||||
| void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc; | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc; | ||||||
| void kmem_cache_free(struct kmem_cache *s, void *objp); | void kmem_cache_free(struct kmem_cache *s, void *objp); | ||||||
| 
 | 
 | ||||||
|  | @ -449,11 +449,12 @@ static __always_inline void kfree_bulk(size_t size, void **p) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_NUMA | #ifdef CONFIG_NUMA | ||||||
| void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc; | void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment | ||||||
|  | 							 __alloc_size(1); | ||||||
| void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment | ||||||
| 									 __malloc; | 									 __malloc; | ||||||
| #else | #else | ||||||
| static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | static __always_inline __alloc_size(1) void *__kmalloc_node(size_t size, gfp_t flags, int node) | ||||||
| { | { | ||||||
| 	return __kmalloc(size, flags); | 	return __kmalloc(size, flags); | ||||||
| } | } | ||||||
|  | @ -466,23 +467,23 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_TRACING | #ifdef CONFIG_TRACING | ||||||
| extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size) | extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size) | ||||||
| 				   __assume_slab_alignment __malloc; | 				   __assume_slab_alignment __alloc_size(3); | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_NUMA | #ifdef CONFIG_NUMA | ||||||
| extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, | ||||||
| 					 int node, size_t size) __assume_slab_alignment __malloc; | 					 int node, size_t size) __assume_slab_alignment | ||||||
|  | 								__alloc_size(4); | ||||||
| #else | #else | ||||||
| static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | static __always_inline __alloc_size(4) void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | ||||||
| 							 gfp_t gfpflags, int node, | 						 gfp_t gfpflags, int node, size_t size) | ||||||
| 							 size_t size) |  | ||||||
| { | { | ||||||
| 	return kmem_cache_alloc_trace(s, gfpflags, size); | 	return kmem_cache_alloc_trace(s, gfpflags, size); | ||||||
| } | } | ||||||
| #endif /* CONFIG_NUMA */ | #endif /* CONFIG_NUMA */ | ||||||
| 
 | 
 | ||||||
| #else /* CONFIG_TRACING */ | #else /* CONFIG_TRACING */ | ||||||
| static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, | static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s, | ||||||
| 						    size_t size) | 								    gfp_t flags, size_t size) | ||||||
| { | { | ||||||
| 	void *ret = kmem_cache_alloc(s, flags); | 	void *ret = kmem_cache_alloc(s, flags); | ||||||
| 
 | 
 | ||||||
|  | @ -501,19 +502,20 @@ static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, g | ||||||
| #endif /* CONFIG_TRACING */ | #endif /* CONFIG_TRACING */ | ||||||
| 
 | 
 | ||||||
| extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment | extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment | ||||||
| 									 __malloc; | 									 __alloc_size(1); | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_TRACING | #ifdef CONFIG_TRACING | ||||||
| extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | ||||||
| 				__assume_page_alignment __malloc; | 				__assume_page_alignment __alloc_size(1); | ||||||
| #else | #else | ||||||
| static __always_inline void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags, | ||||||
|  | 								 unsigned int order) | ||||||
| { | { | ||||||
| 	return kmalloc_order(size, flags, order); | 	return kmalloc_order(size, flags, order); | ||||||
| } | } | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags) | ||||||
| { | { | ||||||
| 	unsigned int order = get_order(size); | 	unsigned int order = get_order(size); | ||||||
| 	return kmalloc_order_trace(size, flags, order); | 	return kmalloc_order_trace(size, flags, order); | ||||||
|  | @ -573,7 +575,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | ||||||
|  *	Try really hard to succeed the allocation but fail |  *	Try really hard to succeed the allocation but fail | ||||||
|  *	eventually. |  *	eventually. | ||||||
|  */ |  */ | ||||||
| static __always_inline void *kmalloc(size_t size, gfp_t flags) | static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) | ||||||
| { | { | ||||||
| 	if (__builtin_constant_p(size)) { | 	if (__builtin_constant_p(size)) { | ||||||
| #ifndef CONFIG_SLOB | #ifndef CONFIG_SLOB | ||||||
|  | @ -595,7 +597,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | ||||||
| 	return __kmalloc(size, flags); | 	return __kmalloc(size, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node) | ||||||
| { | { | ||||||
| #ifndef CONFIG_SLOB | #ifndef CONFIG_SLOB | ||||||
| 	if (__builtin_constant_p(size) && | 	if (__builtin_constant_p(size) && | ||||||
|  | @ -619,7 +621,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||||||
|  * @size: element size. |  * @size: element size. | ||||||
|  * @flags: the type of memory to allocate (see kmalloc). |  * @flags: the type of memory to allocate (see kmalloc). | ||||||
|  */ |  */ | ||||||
| static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) | static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags) | ||||||
| { | { | ||||||
| 	size_t bytes; | 	size_t bytes; | ||||||
| 
 | 
 | ||||||
|  | @ -637,8 +639,10 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) | ||||||
|  * @new_size: new size of a single member of the array |  * @new_size: new size of a single member of the array | ||||||
|  * @flags: the type of memory to allocate (see kmalloc) |  * @flags: the type of memory to allocate (see kmalloc) | ||||||
|  */ |  */ | ||||||
| static inline void * __must_check krealloc_array(void *p, size_t new_n, size_t new_size, | static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p, | ||||||
| 						 gfp_t flags) | 								    size_t new_n, | ||||||
|  | 								    size_t new_size, | ||||||
|  | 								    gfp_t flags) | ||||||
| { | { | ||||||
| 	size_t bytes; | 	size_t bytes; | ||||||
| 
 | 
 | ||||||
|  | @ -654,7 +658,7 @@ static inline void * __must_check krealloc_array(void *p, size_t new_n, size_t n | ||||||
|  * @size: element size. |  * @size: element size. | ||||||
|  * @flags: the type of memory to allocate (see kmalloc). |  * @flags: the type of memory to allocate (see kmalloc). | ||||||
|  */ |  */ | ||||||
| static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags) | ||||||
| { | { | ||||||
| 	return kmalloc_array(n, size, flags | __GFP_ZERO); | 	return kmalloc_array(n, size, flags | __GFP_ZERO); | ||||||
| } | } | ||||||
|  | @ -667,12 +671,13 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | ||||||
|  * allocator where we care about the real place the memory allocation |  * allocator where we care about the real place the memory allocation | ||||||
|  * request comes from. |  * request comes from. | ||||||
|  */ |  */ | ||||||
| extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller); | extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) | ||||||
|  | 				   __alloc_size(1); | ||||||
| #define kmalloc_track_caller(size, flags) \ | #define kmalloc_track_caller(size, flags) \ | ||||||
| 	__kmalloc_track_caller(size, flags, _RET_IP_) | 	__kmalloc_track_caller(size, flags, _RET_IP_) | ||||||
| 
 | 
 | ||||||
| static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, | static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, | ||||||
| 				       int node) | 							  int node) | ||||||
| { | { | ||||||
| 	size_t bytes; | 	size_t bytes; | ||||||
| 
 | 
 | ||||||
|  | @ -683,7 +688,7 @@ static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, | ||||||
| 	return __kmalloc_node(bytes, flags, node); | 	return __kmalloc_node(bytes, flags, node); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) | static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) | ||||||
| { | { | ||||||
| 	return kmalloc_array_node(n, size, flags | __GFP_ZERO, node); | 	return kmalloc_array_node(n, size, flags | __GFP_ZERO, node); | ||||||
| } | } | ||||||
|  | @ -691,7 +696,7 @@ static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_NUMA | #ifdef CONFIG_NUMA | ||||||
| extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node, | extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node, | ||||||
| 					 unsigned long caller); | 					 unsigned long caller) __alloc_size(1); | ||||||
| #define kmalloc_node_track_caller(size, flags, node) \ | #define kmalloc_node_track_caller(size, flags, node) \ | ||||||
| 	__kmalloc_node_track_caller(size, flags, node, \ | 	__kmalloc_node_track_caller(size, flags, node, \ | ||||||
| 			_RET_IP_) | 			_RET_IP_) | ||||||
|  | @ -716,7 +721,7 @@ static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) | ||||||
|  * @size: how many bytes of memory are required. |  * @size: how many bytes of memory are required. | ||||||
|  * @flags: the type of memory to allocate (see kmalloc). |  * @flags: the type of memory to allocate (see kmalloc). | ||||||
|  */ |  */ | ||||||
| static inline void *kzalloc(size_t size, gfp_t flags) | static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags) | ||||||
| { | { | ||||||
| 	return kmalloc(size, flags | __GFP_ZERO); | 	return kmalloc(size, flags | __GFP_ZERO); | ||||||
| } | } | ||||||
|  | @ -727,7 +732,7 @@ static inline void *kzalloc(size_t size, gfp_t flags) | ||||||
|  * @flags: the type of memory to allocate (see kmalloc). |  * @flags: the type of memory to allocate (see kmalloc). | ||||||
|  * @node: memory node from which to allocate |  * @node: memory node from which to allocate | ||||||
|  */ |  */ | ||||||
| static inline void *kzalloc_node(size_t size, gfp_t flags, int node) | static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node) | ||||||
| { | { | ||||||
| 	return kmalloc_node(size, flags | __GFP_ZERO, node); | 	return kmalloc_node(size, flags | __GFP_ZERO, node); | ||||||
| } | } | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Kees Cook
						Kees Cook