mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 08:38:45 +02:00 
			
		
		
		
	bpf: use bpf_map_kvcalloc in bpf_local_storage
Introduce new helper bpf_map_kvcalloc() for the memory allocation in bpf_local_storage(). Then the allocation will charge the memory from the map instead of from current, though currently they are the same thing as it is only used in map creation path now. By charging map's memory into the memcg from the map, it will be more clear. Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Roman Gushchin <roman.gushchin@linux.dev> Link: https://lore.kernel.org/r/20230210154734.4416-3-laoar.shao@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
		
							parent
							
								
									b6c1a8af5b
								
							
						
					
					
						commit
						ddef81b5fd
					
				
					 3 changed files with 25 additions and 2 deletions
				
			
		|  | @ -1886,6 +1886,8 @@ struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); | |||
| void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, | ||||
| 			   int node); | ||||
| void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags); | ||||
| void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, | ||||
| 		       gfp_t flags); | ||||
| void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, | ||||
| 				    size_t align, gfp_t flags); | ||||
| #else | ||||
|  | @ -1902,6 +1904,12 @@ bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) | |||
| 	return kzalloc(size, flags); | ||||
| } | ||||
| 
 | ||||
| static inline void * | ||||
| bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags) | ||||
| { | ||||
| 	return kvcalloc(n, size, flags); | ||||
| } | ||||
| 
 | ||||
| static inline void __percpu * | ||||
| bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, | ||||
| 		     gfp_t flags) | ||||
|  |  | |||
|  | @ -568,8 +568,8 @@ static struct bpf_local_storage_map *__bpf_local_storage_map_alloc(union bpf_att | |||
| 	nbuckets = max_t(u32, 2, nbuckets); | ||||
| 	smap->bucket_log = ilog2(nbuckets); | ||||
| 
 | ||||
| 	smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets, | ||||
| 				 GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT); | ||||
| 	smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets), | ||||
| 					 nbuckets, GFP_USER | __GFP_NOWARN); | ||||
| 	if (!smap->buckets) { | ||||
| 		bpf_map_area_free(smap); | ||||
| 		return ERR_PTR(-ENOMEM); | ||||
|  |  | |||
|  | @ -464,6 +464,21 @@ void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) | |||
| 	return ptr; | ||||
| } | ||||
| 
 | ||||
| void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, | ||||
| 		       gfp_t flags) | ||||
| { | ||||
| 	struct mem_cgroup *memcg, *old_memcg; | ||||
| 	void *ptr; | ||||
| 
 | ||||
| 	memcg = bpf_map_get_memcg(map); | ||||
| 	old_memcg = set_active_memcg(memcg); | ||||
| 	ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT); | ||||
| 	set_active_memcg(old_memcg); | ||||
| 	mem_cgroup_put(memcg); | ||||
| 
 | ||||
| 	return ptr; | ||||
| } | ||||
| 
 | ||||
| void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, | ||||
| 				    size_t align, gfp_t flags) | ||||
| { | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Yafang Shao
						Yafang Shao