mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	bpf: Introduce bpf_map ID
This patch generates an unique ID for each created bpf_map. The approach is similar to the earlier patch for bpf_prog ID. It is worth to note that the bpf_map's ID and bpf_prog's ID are in two independent ID spaces and both have the same valid range: [1, INT_MAX). Signed-off-by: Martin KaFai Lau <kafai@fb.com> Acked-by: Alexei Starovoitov <ast@fb.com> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									dc4bb0e235
								
							
						
					
					
						commit
						f3f1c054c2
					
				
					 2 changed files with 34 additions and 1 deletions
				
			
		| 
						 | 
				
			
			@ -46,6 +46,7 @@ struct bpf_map {
 | 
			
		|||
	u32 max_entries;
 | 
			
		||||
	u32 map_flags;
 | 
			
		||||
	u32 pages;
 | 
			
		||||
	u32 id;
 | 
			
		||||
	struct user_struct *user;
 | 
			
		||||
	const struct bpf_map_ops *ops;
 | 
			
		||||
	struct work_struct work;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -27,6 +27,8 @@
 | 
			
		|||
DEFINE_PER_CPU(int, bpf_prog_active);
 | 
			
		||||
static DEFINE_IDR(prog_idr);
 | 
			
		||||
static DEFINE_SPINLOCK(prog_idr_lock);
 | 
			
		||||
static DEFINE_IDR(map_idr);
 | 
			
		||||
static DEFINE_SPINLOCK(map_idr_lock);
 | 
			
		||||
 | 
			
		||||
int sysctl_unprivileged_bpf_disabled __read_mostly;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -117,6 +119,29 @@ static void bpf_map_uncharge_memlock(struct bpf_map *map)
 | 
			
		|||
	free_uid(user);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int bpf_map_alloc_id(struct bpf_map *map)
 | 
			
		||||
{
 | 
			
		||||
	int id;
 | 
			
		||||
 | 
			
		||||
	spin_lock_bh(&map_idr_lock);
 | 
			
		||||
	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
 | 
			
		||||
	if (id > 0)
 | 
			
		||||
		map->id = id;
 | 
			
		||||
	spin_unlock_bh(&map_idr_lock);
 | 
			
		||||
 | 
			
		||||
	if (WARN_ON_ONCE(!id))
 | 
			
		||||
		return -ENOSPC;
 | 
			
		||||
 | 
			
		||||
	return id > 0 ? 0 : id;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void bpf_map_free_id(struct bpf_map *map)
 | 
			
		||||
{
 | 
			
		||||
	spin_lock_bh(&map_idr_lock);
 | 
			
		||||
	idr_remove(&map_idr, map->id);
 | 
			
		||||
	spin_unlock_bh(&map_idr_lock);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* called from workqueue */
 | 
			
		||||
static void bpf_map_free_deferred(struct work_struct *work)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -141,6 +166,7 @@ static void bpf_map_put_uref(struct bpf_map *map)
 | 
			
		|||
void bpf_map_put(struct bpf_map *map)
 | 
			
		||||
{
 | 
			
		||||
	if (atomic_dec_and_test(&map->refcnt)) {
 | 
			
		||||
		bpf_map_free_id(map);
 | 
			
		||||
		INIT_WORK(&map->work, bpf_map_free_deferred);
 | 
			
		||||
		schedule_work(&map->work);
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -239,14 +265,20 @@ static int map_create(union bpf_attr *attr)
 | 
			
		|||
	if (err)
 | 
			
		||||
		goto free_map_nouncharge;
 | 
			
		||||
 | 
			
		||||
	err = bpf_map_alloc_id(map);
 | 
			
		||||
	if (err)
 | 
			
		||||
		goto free_map;
 | 
			
		||||
 | 
			
		||||
	err = bpf_map_new_fd(map);
 | 
			
		||||
	if (err < 0)
 | 
			
		||||
		/* failed to allocate fd */
 | 
			
		||||
		goto free_map;
 | 
			
		||||
		goto free_id;
 | 
			
		||||
 | 
			
		||||
	trace_bpf_map_create(map, err);
 | 
			
		||||
	return err;
 | 
			
		||||
 | 
			
		||||
free_id:
 | 
			
		||||
	bpf_map_free_id(map);
 | 
			
		||||
free_map:
 | 
			
		||||
	bpf_map_uncharge_memlock(map);
 | 
			
		||||
free_map_nouncharge:
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue