forked from mirrors/linux
		
	 5af6807bdb
			
		
	
	
		5af6807bdb
		
	
	
	
	
		
			
			Introduce bpf_mem_[cache_]free_rcu() similar to kfree_rcu(). Unlike bpf_mem_[cache_]free() that links objects for immediate reuse into per-cpu free list the _rcu() flavor waits for RCU grace period and then moves objects into free_by_rcu_ttrace list where they are waiting for RCU task trace grace period to be freed into slab. The life cycle of objects: alloc: dequeue free_llist free: enqeueu free_llist free_rcu: enqueue free_by_rcu -> waiting_for_gp free_llist above high watermark -> free_by_rcu_ttrace after RCU GP waiting_for_gp -> free_by_rcu_ttrace free_by_rcu_ttrace -> waiting_for_gp_ttrace -> slab Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/bpf/20230706033447.54696-13-alexei.starovoitov@gmail.com
		
			
				
	
	
		
			39 lines
		
	
	
	
		
			1.4 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			39 lines
		
	
	
	
		
			1.4 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0-only */
 | |
| /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
 | |
| #ifndef _BPF_MEM_ALLOC_H
 | |
| #define _BPF_MEM_ALLOC_H
 | |
| #include <linux/compiler_types.h>
 | |
| #include <linux/workqueue.h>
 | |
| 
 | |
| struct bpf_mem_cache;
 | |
| struct bpf_mem_caches;
 | |
| 
 | |
| struct bpf_mem_alloc {
 | |
| 	struct bpf_mem_caches __percpu *caches;
 | |
| 	struct bpf_mem_cache __percpu *cache;
 | |
| 	struct work_struct work;
 | |
| };
 | |
| 
 | |
| /* 'size != 0' is for bpf_mem_alloc which manages fixed-size objects.
 | |
|  * Alloc and free are done with bpf_mem_cache_{alloc,free}().
 | |
|  *
 | |
|  * 'size = 0' is for bpf_mem_alloc which manages many fixed-size objects.
 | |
|  * Alloc and free are done with bpf_mem_{alloc,free}() and the size of
 | |
|  * the returned object is given by the size argument of bpf_mem_alloc().
 | |
|  */
 | |
| int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);
 | |
| void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
 | |
| 
 | |
| /* kmalloc/kfree equivalent: */
 | |
| void *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size);
 | |
| void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr);
 | |
| void bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr);
 | |
| 
 | |
| /* kmem_cache_alloc/free equivalent: */
 | |
| void *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma);
 | |
| void bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr);
 | |
| void bpf_mem_cache_free_rcu(struct bpf_mem_alloc *ma, void *ptr);
 | |
| void bpf_mem_cache_raw_free(void *ptr);
 | |
| void *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags);
 | |
| 
 | |
| #endif /* _BPF_MEM_ALLOC_H */
 |