mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	bpf: Create links for BPF struct_ops maps.
Make bpf_link support struct_ops. Previously, struct_ops were always used alone without any associated links. Upon updating its value, a struct_ops would be activated automatically. Yet other BPF program types required to make a bpf_link with their instances before they could become active. Now, however, you can create an inactive struct_ops, and create a link to activate it later. With bpf_links, struct_ops has a behavior similar to other BPF program types. You can pin/unpin them from their links and the struct_ops will be deactivated when its link is removed while previously need someone to delete the value for it to be deactivated. bpf_links are responsible for registering their associated struct_ops. You can only use a struct_ops that has the BPF_F_LINK flag set to create a bpf_link, while a structs without this flag behaves in the same manner as before and is registered upon updating its value. The BPF_LINK_TYPE_STRUCT_OPS serves a dual purpose. Not only is it used to craft the links for BPF struct_ops programs, but also to create links for BPF struct_ops them-self. Since the links of BPF struct_ops programs are only used to create trampolines internally, they are never seen in other contexts. Thus, they can be reused for struct_ops themself. To maintain a reference to the map supporting this link, we add bpf_struct_ops_link as an additional type. The pointer of the map is RCU and won't be necessary until later in the patchset. Signed-off-by: Kui-Feng Lee <kuifeng@meta.com> Link: https://lore.kernel.org/r/20230323032405.3735486-4-kuifeng@meta.com Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
This commit is contained in:
		
							parent
							
								
									8fb1a76a0f
								
							
						
					
					
						commit
						68b04864ca
					
				
					 6 changed files with 190 additions and 15 deletions
				
			
		|  | @ -1518,6 +1518,7 @@ struct bpf_struct_ops { | |||
| 			   void *kdata, const void *udata); | ||||
| 	int (*reg)(void *kdata); | ||||
| 	void (*unreg)(void *kdata); | ||||
| 	int (*validate)(void *kdata); | ||||
| 	const struct btf_type *type; | ||||
| 	const struct btf_type *value_type; | ||||
| 	const char *name; | ||||
|  | @ -1552,6 +1553,7 @@ static inline void bpf_module_put(const void *data, struct module *owner) | |||
| 	else | ||||
| 		module_put(owner); | ||||
| } | ||||
| int bpf_struct_ops_link_create(union bpf_attr *attr); | ||||
| 
 | ||||
| #ifdef CONFIG_NET | ||||
| /* Define it here to avoid the use of forward declaration */ | ||||
|  | @ -1592,6 +1594,11 @@ static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, | |||
| { | ||||
| 	return -EINVAL; | ||||
| } | ||||
| static inline int bpf_struct_ops_link_create(union bpf_attr *attr) | ||||
| { | ||||
| 	return -EOPNOTSUPP; | ||||
| } | ||||
| 
 | ||||
| #endif | ||||
| 
 | ||||
| #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM) | ||||
|  |  | |||
|  | @ -1033,6 +1033,7 @@ enum bpf_attach_type { | |||
| 	BPF_PERF_EVENT, | ||||
| 	BPF_TRACE_KPROBE_MULTI, | ||||
| 	BPF_LSM_CGROUP, | ||||
| 	BPF_STRUCT_OPS, | ||||
| 	__MAX_BPF_ATTACH_TYPE | ||||
| }; | ||||
| 
 | ||||
|  | @ -1266,6 +1267,9 @@ enum { | |||
| 
 | ||||
| /* Create a map that is suitable to be an inner map with dynamic max entries */ | ||||
| 	BPF_F_INNER_MAP		= (1U << 12), | ||||
| 
 | ||||
| /* Create a map that will be registered/unregesitered by the backed bpf_link */ | ||||
| 	BPF_F_LINK		= (1U << 13), | ||||
| }; | ||||
| 
 | ||||
| /* Flags for BPF_PROG_QUERY. */ | ||||
|  | @ -1507,7 +1511,10 @@ union bpf_attr { | |||
| 	} task_fd_query; | ||||
| 
 | ||||
| 	struct { /* struct used by BPF_LINK_CREATE command */ | ||||
| 		__u32		prog_fd;	/* eBPF program to attach */ | ||||
| 		union { | ||||
| 			__u32		prog_fd;	/* eBPF program to attach */ | ||||
| 			__u32		map_fd;		/* struct_ops to attach */ | ||||
| 		}; | ||||
| 		union { | ||||
| 			__u32		target_fd;	/* object to attach to */ | ||||
| 			__u32		target_ifindex; /* target ifindex */ | ||||
|  | @ -6379,6 +6386,9 @@ struct bpf_link_info { | |||
| 		struct { | ||||
| 			__u32 ifindex; | ||||
| 		} xdp; | ||||
| 		struct { | ||||
| 			__u32 map_id; | ||||
| 		} struct_ops; | ||||
| 	}; | ||||
| } __attribute__((aligned(8))); | ||||
| 
 | ||||
|  |  | |||
|  | @ -17,6 +17,7 @@ enum bpf_struct_ops_state { | |||
| 	BPF_STRUCT_OPS_STATE_INIT, | ||||
| 	BPF_STRUCT_OPS_STATE_INUSE, | ||||
| 	BPF_STRUCT_OPS_STATE_TOBEFREE, | ||||
| 	BPF_STRUCT_OPS_STATE_READY, | ||||
| }; | ||||
| 
 | ||||
| #define BPF_STRUCT_OPS_COMMON_VALUE			\ | ||||
|  | @ -59,6 +60,11 @@ struct bpf_struct_ops_map { | |||
| 	struct bpf_struct_ops_value kvalue; | ||||
| }; | ||||
| 
 | ||||
| struct bpf_struct_ops_link { | ||||
| 	struct bpf_link link; | ||||
| 	struct bpf_map __rcu *map; | ||||
| }; | ||||
| 
 | ||||
| #define VALUE_PREFIX "bpf_struct_ops_" | ||||
| #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1) | ||||
| 
 | ||||
|  | @ -500,11 +506,29 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, | |||
| 		*(unsigned long *)(udata + moff) = prog->aux->id; | ||||
| 	} | ||||
| 
 | ||||
| 	bpf_map_inc(map); | ||||
| 	if (st_map->map.map_flags & BPF_F_LINK) { | ||||
| 		err = st_ops->validate(kdata); | ||||
| 		if (err) | ||||
| 			goto reset_unlock; | ||||
| 		set_memory_rox((long)st_map->image, 1); | ||||
| 		/* Let bpf_link handle registration & unregistration.
 | ||||
| 		 * | ||||
| 		 * Pair with smp_load_acquire() during lookup_elem(). | ||||
| 		 */ | ||||
| 		smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_READY); | ||||
| 		goto unlock; | ||||
| 	} | ||||
| 
 | ||||
| 	set_memory_rox((long)st_map->image, 1); | ||||
| 	err = st_ops->reg(kdata); | ||||
| 	if (likely(!err)) { | ||||
| 		/* This refcnt increment on the map here after
 | ||||
| 		 * 'st_ops->reg()' is secure since the state of the | ||||
| 		 * map must be set to INIT at this moment, and thus | ||||
| 		 * bpf_struct_ops_map_delete_elem() can't unregister | ||||
| 		 * or transition it to TOBEFREE concurrently. | ||||
| 		 */ | ||||
| 		bpf_map_inc(map); | ||||
| 		/* Pair with smp_load_acquire() during lookup_elem().
 | ||||
| 		 * It ensures the above udata updates (e.g. prog->aux->id) | ||||
| 		 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set. | ||||
|  | @ -520,7 +544,6 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, | |||
| 	 */ | ||||
| 	set_memory_nx((long)st_map->image, 1); | ||||
| 	set_memory_rw((long)st_map->image, 1); | ||||
| 	bpf_map_put(map); | ||||
| 
 | ||||
| reset_unlock: | ||||
| 	bpf_struct_ops_map_put_progs(st_map); | ||||
|  | @ -538,6 +561,9 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key) | |||
| 	struct bpf_struct_ops_map *st_map; | ||||
| 
 | ||||
| 	st_map = (struct bpf_struct_ops_map *)map; | ||||
| 	if (st_map->map.map_flags & BPF_F_LINK) | ||||
| 		return -EOPNOTSUPP; | ||||
| 
 | ||||
| 	prev_state = cmpxchg(&st_map->kvalue.state, | ||||
| 			     BPF_STRUCT_OPS_STATE_INUSE, | ||||
| 			     BPF_STRUCT_OPS_STATE_TOBEFREE); | ||||
|  | @ -614,7 +640,7 @@ static void bpf_struct_ops_map_free(struct bpf_map *map) | |||
| static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr) | ||||
| { | ||||
| 	if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 || | ||||
| 	    attr->map_flags || !attr->btf_vmlinux_value_type_id) | ||||
| 	    (attr->map_flags & ~BPF_F_LINK) || !attr->btf_vmlinux_value_type_id) | ||||
| 		return -EINVAL; | ||||
| 	return 0; | ||||
| } | ||||
|  | @ -638,6 +664,9 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) | |||
| 	if (attr->value_size != vt->size) | ||||
| 		return ERR_PTR(-EINVAL); | ||||
| 
 | ||||
| 	if (attr->map_flags & BPF_F_LINK && !st_ops->validate) | ||||
| 		return ERR_PTR(-EOPNOTSUPP); | ||||
| 
 | ||||
| 	t = st_ops->type; | ||||
| 
 | ||||
| 	st_map_size = sizeof(*st_map) + | ||||
|  | @ -725,3 +754,111 @@ void bpf_struct_ops_put(const void *kdata) | |||
| 
 | ||||
| 	bpf_map_put(&st_map->map); | ||||
| } | ||||
| 
 | ||||
| static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map) | ||||
| { | ||||
| 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; | ||||
| 
 | ||||
| 	return map->map_type == BPF_MAP_TYPE_STRUCT_OPS && | ||||
| 		map->map_flags & BPF_F_LINK && | ||||
| 		/* Pair with smp_store_release() during map_update */ | ||||
| 		smp_load_acquire(&st_map->kvalue.state) == BPF_STRUCT_OPS_STATE_READY; | ||||
| } | ||||
| 
 | ||||
| static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link) | ||||
| { | ||||
| 	struct bpf_struct_ops_link *st_link; | ||||
| 	struct bpf_struct_ops_map *st_map; | ||||
| 
 | ||||
| 	st_link = container_of(link, struct bpf_struct_ops_link, link); | ||||
| 	st_map = (struct bpf_struct_ops_map *) | ||||
| 		rcu_dereference_protected(st_link->map, true); | ||||
| 	if (st_map) { | ||||
| 		/* st_link->map can be NULL if
 | ||||
| 		 * bpf_struct_ops_link_create() fails to register. | ||||
| 		 */ | ||||
| 		st_map->st_ops->unreg(&st_map->kvalue.data); | ||||
| 		bpf_map_put(&st_map->map); | ||||
| 	} | ||||
| 	kfree(st_link); | ||||
| } | ||||
| 
 | ||||
| static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link, | ||||
| 					    struct seq_file *seq) | ||||
| { | ||||
| 	struct bpf_struct_ops_link *st_link; | ||||
| 	struct bpf_map *map; | ||||
| 
 | ||||
| 	st_link = container_of(link, struct bpf_struct_ops_link, link); | ||||
| 	rcu_read_lock(); | ||||
| 	map = rcu_dereference(st_link->map); | ||||
| 	seq_printf(seq, "map_id:\t%d\n", map->id); | ||||
| 	rcu_read_unlock(); | ||||
| } | ||||
| 
 | ||||
| static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link, | ||||
| 					       struct bpf_link_info *info) | ||||
| { | ||||
| 	struct bpf_struct_ops_link *st_link; | ||||
| 	struct bpf_map *map; | ||||
| 
 | ||||
| 	st_link = container_of(link, struct bpf_struct_ops_link, link); | ||||
| 	rcu_read_lock(); | ||||
| 	map = rcu_dereference(st_link->map); | ||||
| 	info->struct_ops.map_id = map->id; | ||||
| 	rcu_read_unlock(); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static const struct bpf_link_ops bpf_struct_ops_map_lops = { | ||||
| 	.dealloc = bpf_struct_ops_map_link_dealloc, | ||||
| 	.show_fdinfo = bpf_struct_ops_map_link_show_fdinfo, | ||||
| 	.fill_link_info = bpf_struct_ops_map_link_fill_link_info, | ||||
| }; | ||||
| 
 | ||||
| int bpf_struct_ops_link_create(union bpf_attr *attr) | ||||
| { | ||||
| 	struct bpf_struct_ops_link *link = NULL; | ||||
| 	struct bpf_link_primer link_primer; | ||||
| 	struct bpf_struct_ops_map *st_map; | ||||
| 	struct bpf_map *map; | ||||
| 	int err; | ||||
| 
 | ||||
| 	map = bpf_map_get(attr->link_create.map_fd); | ||||
| 	if (!map) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	st_map = (struct bpf_struct_ops_map *)map; | ||||
| 
 | ||||
| 	if (!bpf_struct_ops_valid_to_reg(map)) { | ||||
| 		err = -EINVAL; | ||||
| 		goto err_out; | ||||
| 	} | ||||
| 
 | ||||
| 	link = kzalloc(sizeof(*link), GFP_USER); | ||||
| 	if (!link) { | ||||
| 		err = -ENOMEM; | ||||
| 		goto err_out; | ||||
| 	} | ||||
| 	bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL); | ||||
| 
 | ||||
| 	err = bpf_link_prime(&link->link, &link_primer); | ||||
| 	if (err) | ||||
| 		goto err_out; | ||||
| 
 | ||||
| 	err = st_map->st_ops->reg(st_map->kvalue.data); | ||||
| 	if (err) { | ||||
| 		bpf_link_cleanup(&link_primer); | ||||
| 		link = NULL; | ||||
| 		goto err_out; | ||||
| 	} | ||||
| 	RCU_INIT_POINTER(link->map, map); | ||||
| 
 | ||||
| 	return bpf_link_settle(&link_primer); | ||||
| 
 | ||||
| err_out: | ||||
| 	bpf_map_put(map); | ||||
| 	kfree(link); | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -2825,16 +2825,19 @@ static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) | |||
| 	const struct bpf_prog *prog = link->prog; | ||||
| 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; | ||||
| 
 | ||||
| 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); | ||||
| 	seq_printf(m, | ||||
| 		   "link_type:\t%s\n" | ||||
| 		   "link_id:\t%u\n" | ||||
| 		   "prog_tag:\t%s\n" | ||||
| 		   "prog_id:\t%u\n", | ||||
| 		   "link_id:\t%u\n", | ||||
| 		   bpf_link_type_strs[link->type], | ||||
| 		   link->id, | ||||
| 		   prog_tag, | ||||
| 		   prog->aux->id); | ||||
| 		   link->id); | ||||
| 	if (prog) { | ||||
| 		bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); | ||||
| 		seq_printf(m, | ||||
| 			   "prog_tag:\t%s\n" | ||||
| 			   "prog_id:\t%u\n", | ||||
| 			   prog_tag, | ||||
| 			   prog->aux->id); | ||||
| 	} | ||||
| 	if (link->ops->show_fdinfo) | ||||
| 		link->ops->show_fdinfo(link, m); | ||||
| } | ||||
|  | @ -4314,7 +4317,8 @@ static int bpf_link_get_info_by_fd(struct file *file, | |||
| 
 | ||||
| 	info.type = link->type; | ||||
| 	info.id = link->id; | ||||
| 	info.prog_id = link->prog->aux->id; | ||||
| 	if (link->prog) | ||||
| 		info.prog_id = link->prog->aux->id; | ||||
| 
 | ||||
| 	if (link->ops->fill_link_info) { | ||||
| 		err = link->ops->fill_link_info(link, &info); | ||||
|  | @ -4577,6 +4581,9 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) | |||
| 	if (CHECK_ATTR(BPF_LINK_CREATE)) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	if (attr->link_create.attach_type == BPF_STRUCT_OPS) | ||||
| 		return bpf_struct_ops_link_create(attr); | ||||
| 
 | ||||
| 	prog = bpf_prog_get(attr->link_create.prog_fd); | ||||
| 	if (IS_ERR(prog)) | ||||
| 		return PTR_ERR(prog); | ||||
|  |  | |||
|  | @ -239,8 +239,6 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t, | |||
| 		if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name, | ||||
| 				     sizeof(tcp_ca->name)) <= 0) | ||||
| 			return -EINVAL; | ||||
| 		if (tcp_ca_find(utcp_ca->name)) | ||||
| 			return -EEXIST; | ||||
| 		return 1; | ||||
| 	} | ||||
| 
 | ||||
|  | @ -266,6 +264,11 @@ static void bpf_tcp_ca_unreg(void *kdata) | |||
| 	tcp_unregister_congestion_control(kdata); | ||||
| } | ||||
| 
 | ||||
| static int bpf_tcp_ca_validate(void *kdata) | ||||
| { | ||||
| 	return tcp_validate_congestion_control(kdata); | ||||
| } | ||||
| 
 | ||||
| struct bpf_struct_ops bpf_tcp_congestion_ops = { | ||||
| 	.verifier_ops = &bpf_tcp_ca_verifier_ops, | ||||
| 	.reg = bpf_tcp_ca_reg, | ||||
|  | @ -273,6 +276,7 @@ struct bpf_struct_ops bpf_tcp_congestion_ops = { | |||
| 	.check_member = bpf_tcp_ca_check_member, | ||||
| 	.init_member = bpf_tcp_ca_init_member, | ||||
| 	.init = bpf_tcp_ca_init, | ||||
| 	.validate = bpf_tcp_ca_validate, | ||||
| 	.name = "tcp_congestion_ops", | ||||
| }; | ||||
| 
 | ||||
|  |  | |||
|  | @ -1033,6 +1033,7 @@ enum bpf_attach_type { | |||
| 	BPF_PERF_EVENT, | ||||
| 	BPF_TRACE_KPROBE_MULTI, | ||||
| 	BPF_LSM_CGROUP, | ||||
| 	BPF_STRUCT_OPS, | ||||
| 	__MAX_BPF_ATTACH_TYPE | ||||
| }; | ||||
| 
 | ||||
|  | @ -1266,6 +1267,9 @@ enum { | |||
| 
 | ||||
| /* Create a map that is suitable to be an inner map with dynamic max entries */ | ||||
| 	BPF_F_INNER_MAP		= (1U << 12), | ||||
| 
 | ||||
| /* Create a map that will be registered/unregesitered by the backed bpf_link */ | ||||
| 	BPF_F_LINK		= (1U << 13), | ||||
| }; | ||||
| 
 | ||||
| /* Flags for BPF_PROG_QUERY. */ | ||||
|  | @ -1507,7 +1511,10 @@ union bpf_attr { | |||
| 	} task_fd_query; | ||||
| 
 | ||||
| 	struct { /* struct used by BPF_LINK_CREATE command */ | ||||
| 		__u32		prog_fd;	/* eBPF program to attach */ | ||||
| 		union { | ||||
| 			__u32		prog_fd;	/* eBPF program to attach */ | ||||
| 			__u32		map_fd;		/* eBPF struct_ops to attach */ | ||||
| 		}; | ||||
| 		union { | ||||
| 			__u32		target_fd;	/* object to attach to */ | ||||
| 			__u32		target_ifindex; /* target ifindex */ | ||||
|  | @ -6379,6 +6386,9 @@ struct bpf_link_info { | |||
| 		struct { | ||||
| 			__u32 ifindex; | ||||
| 		} xdp; | ||||
| 		struct { | ||||
| 			__u32 map_id; | ||||
| 		} struct_ops; | ||||
| 	}; | ||||
| } __attribute__((aligned(8))); | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Kui-Feng Lee
						Kui-Feng Lee