mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	bpf: Switch bpf_map_{area_alloc,area_mmapable_alloc}() to u64 size
Given we recently extended the original bpf_map_area_alloc() helper in commitfc9702273e("bpf: Add mmap() support for BPF_MAP_TYPE_ARRAY"), we need to apply the same logic as inff1c08e1f7("bpf: Change size to u64 for bpf_map_{area_alloc, charge_init}()"). To avoid conflicts, extend it for bpf-next. Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
		
							parent
							
								
									91e6015b08
								
							
						
					
					
						commit
						196e8ca748
					
				
					 2 changed files with 10 additions and 7 deletions
				
			
		| 
						 | 
					@ -794,12 +794,12 @@ void bpf_map_put_with_uref(struct bpf_map *map);
 | 
				
			||||||
void bpf_map_put(struct bpf_map *map);
 | 
					void bpf_map_put(struct bpf_map *map);
 | 
				
			||||||
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
 | 
					int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
 | 
				
			||||||
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
 | 
					void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
 | 
				
			||||||
int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size);
 | 
					int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size);
 | 
				
			||||||
void bpf_map_charge_finish(struct bpf_map_memory *mem);
 | 
					void bpf_map_charge_finish(struct bpf_map_memory *mem);
 | 
				
			||||||
void bpf_map_charge_move(struct bpf_map_memory *dst,
 | 
					void bpf_map_charge_move(struct bpf_map_memory *dst,
 | 
				
			||||||
			 struct bpf_map_memory *src);
 | 
								 struct bpf_map_memory *src);
 | 
				
			||||||
void *bpf_map_area_alloc(size_t size, int numa_node);
 | 
					void *bpf_map_area_alloc(u64 size, int numa_node);
 | 
				
			||||||
void *bpf_map_area_mmapable_alloc(size_t size, int numa_node);
 | 
					void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
 | 
				
			||||||
void bpf_map_area_free(void *base);
 | 
					void bpf_map_area_free(void *base);
 | 
				
			||||||
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
 | 
					void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -128,7 +128,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
 | 
				
			||||||
	return map;
 | 
						return map;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void *__bpf_map_area_alloc(size_t size, int numa_node, bool mmapable)
 | 
					static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	/* We really just want to fail instead of triggering OOM killer
 | 
						/* We really just want to fail instead of triggering OOM killer
 | 
				
			||||||
	 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
 | 
						 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
 | 
				
			||||||
| 
						 | 
					@ -143,6 +143,9 @@ static void *__bpf_map_area_alloc(size_t size, int numa_node, bool mmapable)
 | 
				
			||||||
	const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
 | 
						const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
 | 
				
			||||||
	void *area;
 | 
						void *area;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (size >= SIZE_MAX)
 | 
				
			||||||
 | 
							return NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* kmalloc()'ed memory can't be mmap()'ed */
 | 
						/* kmalloc()'ed memory can't be mmap()'ed */
 | 
				
			||||||
	if (!mmapable && size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
 | 
						if (!mmapable && size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
 | 
				
			||||||
		area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
 | 
							area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
 | 
				
			||||||
| 
						 | 
					@ -160,12 +163,12 @@ static void *__bpf_map_area_alloc(size_t size, int numa_node, bool mmapable)
 | 
				
			||||||
					   flags, __builtin_return_address(0));
 | 
										   flags, __builtin_return_address(0));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void *bpf_map_area_alloc(size_t size, int numa_node)
 | 
					void *bpf_map_area_alloc(u64 size, int numa_node)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return __bpf_map_area_alloc(size, numa_node, false);
 | 
						return __bpf_map_area_alloc(size, numa_node, false);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void *bpf_map_area_mmapable_alloc(size_t size, int numa_node)
 | 
					void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return __bpf_map_area_alloc(size, numa_node, true);
 | 
						return __bpf_map_area_alloc(size, numa_node, true);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -214,7 +217,7 @@ static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
 | 
				
			||||||
		atomic_long_sub(pages, &user->locked_vm);
 | 
							atomic_long_sub(pages, &user->locked_vm);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size)
 | 
					int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
 | 
						u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
 | 
				
			||||||
	struct user_struct *user;
 | 
						struct user_struct *user;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue