mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	 02a8c817a3
			
		
	
	
		02a8c817a3
		
	
	
	
	
		
			
			commit f1a2e44a3a ("bpf: add queue and stack maps") introduced new BPF
helper functions:
- BPF_FUNC_map_push_elem
- BPF_FUNC_map_pop_elem
- BPF_FUNC_map_peek_elem
but they were made available only for network BPF programs. This patch
makes them available for tracepoint, cgroup and lirc programs.
Signed-off-by: Alban Crequy <alban@kinvolk.io>
Cc: Mauricio Vasquez B <mauricio.vasquez@polito.it>
Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
		
	
			
		
			
				
	
	
		
			1132 lines
		
	
	
	
		
			29 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			1132 lines
		
	
	
	
		
			29 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  * Functions to manage eBPF programs attached to cgroups
 | |
|  *
 | |
|  * Copyright (c) 2016 Daniel Mack
 | |
|  *
 | |
|  * This file is subject to the terms and conditions of version 2 of the GNU
 | |
|  * General Public License.  See the file COPYING in the main directory of the
 | |
|  * Linux distribution for more details.
 | |
|  */
 | |
| 
 | |
| #include <linux/kernel.h>
 | |
| #include <linux/atomic.h>
 | |
| #include <linux/cgroup.h>
 | |
| #include <linux/filter.h>
 | |
| #include <linux/slab.h>
 | |
| #include <linux/sysctl.h>
 | |
| #include <linux/string.h>
 | |
| #include <linux/bpf.h>
 | |
| #include <linux/bpf-cgroup.h>
 | |
| #include <net/sock.h>
 | |
| 
 | |
| DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
 | |
| EXPORT_SYMBOL(cgroup_bpf_enabled_key);
 | |
| 
 | |
| /**
 | |
|  * cgroup_bpf_put() - put references of all bpf programs
 | |
|  * @cgrp: the cgroup to modify
 | |
|  */
 | |
| void cgroup_bpf_put(struct cgroup *cgrp)
 | |
| {
 | |
| 	enum bpf_cgroup_storage_type stype;
 | |
| 	unsigned int type;
 | |
| 
 | |
| 	for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
 | |
| 		struct list_head *progs = &cgrp->bpf.progs[type];
 | |
| 		struct bpf_prog_list *pl, *tmp;
 | |
| 
 | |
| 		list_for_each_entry_safe(pl, tmp, progs, node) {
 | |
| 			list_del(&pl->node);
 | |
| 			bpf_prog_put(pl->prog);
 | |
| 			for_each_cgroup_storage_type(stype) {
 | |
| 				bpf_cgroup_storage_unlink(pl->storage[stype]);
 | |
| 				bpf_cgroup_storage_free(pl->storage[stype]);
 | |
| 			}
 | |
| 			kfree(pl);
 | |
| 			static_branch_dec(&cgroup_bpf_enabled_key);
 | |
| 		}
 | |
| 		bpf_prog_array_free(cgrp->bpf.effective[type]);
 | |
| 	}
 | |
| }
 | |
| 
 | |
| /* count number of elements in the list.
 | |
|  * it's slow but the list cannot be long
 | |
|  */
 | |
| static u32 prog_list_length(struct list_head *head)
 | |
| {
 | |
| 	struct bpf_prog_list *pl;
 | |
| 	u32 cnt = 0;
 | |
| 
 | |
| 	list_for_each_entry(pl, head, node) {
 | |
| 		if (!pl->prog)
 | |
| 			continue;
 | |
| 		cnt++;
 | |
| 	}
 | |
| 	return cnt;
 | |
| }
 | |
| 
 | |
| /* if parent has non-overridable prog attached,
 | |
|  * disallow attaching new programs to the descendent cgroup.
 | |
|  * if parent has overridable or multi-prog, allow attaching
 | |
|  */
 | |
| static bool hierarchy_allows_attach(struct cgroup *cgrp,
 | |
| 				    enum bpf_attach_type type,
 | |
| 				    u32 new_flags)
 | |
| {
 | |
| 	struct cgroup *p;
 | |
| 
 | |
| 	p = cgroup_parent(cgrp);
 | |
| 	if (!p)
 | |
| 		return true;
 | |
| 	do {
 | |
| 		u32 flags = p->bpf.flags[type];
 | |
| 		u32 cnt;
 | |
| 
 | |
| 		if (flags & BPF_F_ALLOW_MULTI)
 | |
| 			return true;
 | |
| 		cnt = prog_list_length(&p->bpf.progs[type]);
 | |
| 		WARN_ON_ONCE(cnt > 1);
 | |
| 		if (cnt == 1)
 | |
| 			return !!(flags & BPF_F_ALLOW_OVERRIDE);
 | |
| 		p = cgroup_parent(p);
 | |
| 	} while (p);
 | |
| 	return true;
 | |
| }
 | |
| 
 | |
| /* compute a chain of effective programs for a given cgroup:
 | |
|  * start from the list of programs in this cgroup and add
 | |
|  * all parent programs.
 | |
|  * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
 | |
|  * to programs in this cgroup
 | |
|  */
 | |
| static int compute_effective_progs(struct cgroup *cgrp,
 | |
| 				   enum bpf_attach_type type,
 | |
| 				   struct bpf_prog_array __rcu **array)
 | |
| {
 | |
| 	enum bpf_cgroup_storage_type stype;
 | |
| 	struct bpf_prog_array *progs;
 | |
| 	struct bpf_prog_list *pl;
 | |
| 	struct cgroup *p = cgrp;
 | |
| 	int cnt = 0;
 | |
| 
 | |
| 	/* count number of effective programs by walking parents */
 | |
| 	do {
 | |
| 		if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
 | |
| 			cnt += prog_list_length(&p->bpf.progs[type]);
 | |
| 		p = cgroup_parent(p);
 | |
| 	} while (p);
 | |
| 
 | |
| 	progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
 | |
| 	if (!progs)
 | |
| 		return -ENOMEM;
 | |
| 
 | |
| 	/* populate the array with effective progs */
 | |
| 	cnt = 0;
 | |
| 	p = cgrp;
 | |
| 	do {
 | |
| 		if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
 | |
| 			continue;
 | |
| 
 | |
| 		list_for_each_entry(pl, &p->bpf.progs[type], node) {
 | |
| 			if (!pl->prog)
 | |
| 				continue;
 | |
| 
 | |
| 			progs->items[cnt].prog = pl->prog;
 | |
| 			for_each_cgroup_storage_type(stype)
 | |
| 				progs->items[cnt].cgroup_storage[stype] =
 | |
| 					pl->storage[stype];
 | |
| 			cnt++;
 | |
| 		}
 | |
| 	} while ((p = cgroup_parent(p)));
 | |
| 
 | |
| 	rcu_assign_pointer(*array, progs);
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| static void activate_effective_progs(struct cgroup *cgrp,
 | |
| 				     enum bpf_attach_type type,
 | |
| 				     struct bpf_prog_array __rcu *array)
 | |
| {
 | |
| 	struct bpf_prog_array __rcu *old_array;
 | |
| 
 | |
| 	old_array = xchg(&cgrp->bpf.effective[type], array);
 | |
| 	/* free prog array after grace period, since __cgroup_bpf_run_*()
 | |
| 	 * might be still walking the array
 | |
| 	 */
 | |
| 	bpf_prog_array_free(old_array);
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * cgroup_bpf_inherit() - inherit effective programs from parent
 | |
|  * @cgrp: the cgroup to modify
 | |
|  */
 | |
| int cgroup_bpf_inherit(struct cgroup *cgrp)
 | |
| {
 | |
| /* has to use marco instead of const int, since compiler thinks
 | |
|  * that array below is variable length
 | |
|  */
 | |
| #define	NR ARRAY_SIZE(cgrp->bpf.effective)
 | |
| 	struct bpf_prog_array __rcu *arrays[NR] = {};
 | |
| 	int i;
 | |
| 
 | |
| 	for (i = 0; i < NR; i++)
 | |
| 		INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
 | |
| 
 | |
| 	for (i = 0; i < NR; i++)
 | |
| 		if (compute_effective_progs(cgrp, i, &arrays[i]))
 | |
| 			goto cleanup;
 | |
| 
 | |
| 	for (i = 0; i < NR; i++)
 | |
| 		activate_effective_progs(cgrp, i, arrays[i]);
 | |
| 
 | |
| 	return 0;
 | |
| cleanup:
 | |
| 	for (i = 0; i < NR; i++)
 | |
| 		bpf_prog_array_free(arrays[i]);
 | |
| 	return -ENOMEM;
 | |
| }
 | |
| 
 | |
| static int update_effective_progs(struct cgroup *cgrp,
 | |
| 				  enum bpf_attach_type type)
 | |
| {
 | |
| 	struct cgroup_subsys_state *css;
 | |
| 	int err;
 | |
| 
 | |
| 	/* allocate and recompute effective prog arrays */
 | |
| 	css_for_each_descendant_pre(css, &cgrp->self) {
 | |
| 		struct cgroup *desc = container_of(css, struct cgroup, self);
 | |
| 
 | |
| 		err = compute_effective_progs(desc, type, &desc->bpf.inactive);
 | |
| 		if (err)
 | |
| 			goto cleanup;
 | |
| 	}
 | |
| 
 | |
| 	/* all allocations were successful. Activate all prog arrays */
 | |
| 	css_for_each_descendant_pre(css, &cgrp->self) {
 | |
| 		struct cgroup *desc = container_of(css, struct cgroup, self);
 | |
| 
 | |
| 		activate_effective_progs(desc, type, desc->bpf.inactive);
 | |
| 		desc->bpf.inactive = NULL;
 | |
| 	}
 | |
| 
 | |
| 	return 0;
 | |
| 
 | |
| cleanup:
 | |
| 	/* oom while computing effective. Free all computed effective arrays
 | |
| 	 * since they were not activated
 | |
| 	 */
 | |
| 	css_for_each_descendant_pre(css, &cgrp->self) {
 | |
| 		struct cgroup *desc = container_of(css, struct cgroup, self);
 | |
| 
 | |
| 		bpf_prog_array_free(desc->bpf.inactive);
 | |
| 		desc->bpf.inactive = NULL;
 | |
| 	}
 | |
| 
 | |
| 	return err;
 | |
| }
 | |
| 
 | |
| #define BPF_CGROUP_MAX_PROGS 64
 | |
| 
 | |
| /**
 | |
|  * __cgroup_bpf_attach() - Attach the program to a cgroup, and
 | |
|  *                         propagate the change to descendants
 | |
|  * @cgrp: The cgroup which descendants to traverse
 | |
|  * @prog: A program to attach
 | |
|  * @type: Type of attach operation
 | |
|  * @flags: Option flags
 | |
|  *
 | |
|  * Must be called with cgroup_mutex held.
 | |
|  */
 | |
| int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
 | |
| 			enum bpf_attach_type type, u32 flags)
 | |
| {
 | |
| 	struct list_head *progs = &cgrp->bpf.progs[type];
 | |
| 	struct bpf_prog *old_prog = NULL;
 | |
| 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
 | |
| 		*old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL};
 | |
| 	enum bpf_cgroup_storage_type stype;
 | |
| 	struct bpf_prog_list *pl;
 | |
| 	bool pl_was_allocated;
 | |
| 	int err;
 | |
| 
 | |
| 	if ((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI))
 | |
| 		/* invalid combination */
 | |
| 		return -EINVAL;
 | |
| 
 | |
| 	if (!hierarchy_allows_attach(cgrp, type, flags))
 | |
| 		return -EPERM;
 | |
| 
 | |
| 	if (!list_empty(progs) && cgrp->bpf.flags[type] != flags)
 | |
| 		/* Disallow attaching non-overridable on top
 | |
| 		 * of existing overridable in this cgroup.
 | |
| 		 * Disallow attaching multi-prog if overridable or none
 | |
| 		 */
 | |
| 		return -EPERM;
 | |
| 
 | |
| 	if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
 | |
| 		return -E2BIG;
 | |
| 
 | |
| 	for_each_cgroup_storage_type(stype) {
 | |
| 		storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
 | |
| 		if (IS_ERR(storage[stype])) {
 | |
| 			storage[stype] = NULL;
 | |
| 			for_each_cgroup_storage_type(stype)
 | |
| 				bpf_cgroup_storage_free(storage[stype]);
 | |
| 			return -ENOMEM;
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	if (flags & BPF_F_ALLOW_MULTI) {
 | |
| 		list_for_each_entry(pl, progs, node) {
 | |
| 			if (pl->prog == prog) {
 | |
| 				/* disallow attaching the same prog twice */
 | |
| 				for_each_cgroup_storage_type(stype)
 | |
| 					bpf_cgroup_storage_free(storage[stype]);
 | |
| 				return -EINVAL;
 | |
| 			}
 | |
| 		}
 | |
| 
 | |
| 		pl = kmalloc(sizeof(*pl), GFP_KERNEL);
 | |
| 		if (!pl) {
 | |
| 			for_each_cgroup_storage_type(stype)
 | |
| 				bpf_cgroup_storage_free(storage[stype]);
 | |
| 			return -ENOMEM;
 | |
| 		}
 | |
| 
 | |
| 		pl_was_allocated = true;
 | |
| 		pl->prog = prog;
 | |
| 		for_each_cgroup_storage_type(stype)
 | |
| 			pl->storage[stype] = storage[stype];
 | |
| 		list_add_tail(&pl->node, progs);
 | |
| 	} else {
 | |
| 		if (list_empty(progs)) {
 | |
| 			pl = kmalloc(sizeof(*pl), GFP_KERNEL);
 | |
| 			if (!pl) {
 | |
| 				for_each_cgroup_storage_type(stype)
 | |
| 					bpf_cgroup_storage_free(storage[stype]);
 | |
| 				return -ENOMEM;
 | |
| 			}
 | |
| 			pl_was_allocated = true;
 | |
| 			list_add_tail(&pl->node, progs);
 | |
| 		} else {
 | |
| 			pl = list_first_entry(progs, typeof(*pl), node);
 | |
| 			old_prog = pl->prog;
 | |
| 			for_each_cgroup_storage_type(stype) {
 | |
| 				old_storage[stype] = pl->storage[stype];
 | |
| 				bpf_cgroup_storage_unlink(old_storage[stype]);
 | |
| 			}
 | |
| 			pl_was_allocated = false;
 | |
| 		}
 | |
| 		pl->prog = prog;
 | |
| 		for_each_cgroup_storage_type(stype)
 | |
| 			pl->storage[stype] = storage[stype];
 | |
| 	}
 | |
| 
 | |
| 	cgrp->bpf.flags[type] = flags;
 | |
| 
 | |
| 	err = update_effective_progs(cgrp, type);
 | |
| 	if (err)
 | |
| 		goto cleanup;
 | |
| 
 | |
| 	static_branch_inc(&cgroup_bpf_enabled_key);
 | |
| 	for_each_cgroup_storage_type(stype) {
 | |
| 		if (!old_storage[stype])
 | |
| 			continue;
 | |
| 		bpf_cgroup_storage_free(old_storage[stype]);
 | |
| 	}
 | |
| 	if (old_prog) {
 | |
| 		bpf_prog_put(old_prog);
 | |
| 		static_branch_dec(&cgroup_bpf_enabled_key);
 | |
| 	}
 | |
| 	for_each_cgroup_storage_type(stype)
 | |
| 		bpf_cgroup_storage_link(storage[stype], cgrp, type);
 | |
| 	return 0;
 | |
| 
 | |
| cleanup:
 | |
| 	/* and cleanup the prog list */
 | |
| 	pl->prog = old_prog;
 | |
| 	for_each_cgroup_storage_type(stype) {
 | |
| 		bpf_cgroup_storage_free(pl->storage[stype]);
 | |
| 		pl->storage[stype] = old_storage[stype];
 | |
| 		bpf_cgroup_storage_link(old_storage[stype], cgrp, type);
 | |
| 	}
 | |
| 	if (pl_was_allocated) {
 | |
| 		list_del(&pl->node);
 | |
| 		kfree(pl);
 | |
| 	}
 | |
| 	return err;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * __cgroup_bpf_detach() - Detach the program from a cgroup, and
 | |
|  *                         propagate the change to descendants
 | |
|  * @cgrp: The cgroup which descendants to traverse
 | |
|  * @prog: A program to detach or NULL
 | |
|  * @type: Type of detach operation
 | |
|  *
 | |
|  * Must be called with cgroup_mutex held.
 | |
|  */
 | |
| int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
 | |
| 			enum bpf_attach_type type)
 | |
| {
 | |
| 	struct list_head *progs = &cgrp->bpf.progs[type];
 | |
| 	enum bpf_cgroup_storage_type stype;
 | |
| 	u32 flags = cgrp->bpf.flags[type];
 | |
| 	struct bpf_prog *old_prog = NULL;
 | |
| 	struct bpf_prog_list *pl;
 | |
| 	int err;
 | |
| 
 | |
| 	if (flags & BPF_F_ALLOW_MULTI) {
 | |
| 		if (!prog)
 | |
| 			/* to detach MULTI prog the user has to specify valid FD
 | |
| 			 * of the program to be detached
 | |
| 			 */
 | |
| 			return -EINVAL;
 | |
| 	} else {
 | |
| 		if (list_empty(progs))
 | |
| 			/* report error when trying to detach and nothing is attached */
 | |
| 			return -ENOENT;
 | |
| 	}
 | |
| 
 | |
| 	if (flags & BPF_F_ALLOW_MULTI) {
 | |
| 		/* find the prog and detach it */
 | |
| 		list_for_each_entry(pl, progs, node) {
 | |
| 			if (pl->prog != prog)
 | |
| 				continue;
 | |
| 			old_prog = prog;
 | |
| 			/* mark it deleted, so it's ignored while
 | |
| 			 * recomputing effective
 | |
| 			 */
 | |
| 			pl->prog = NULL;
 | |
| 			break;
 | |
| 		}
 | |
| 		if (!old_prog)
 | |
| 			return -ENOENT;
 | |
| 	} else {
 | |
| 		/* to maintain backward compatibility NONE and OVERRIDE cgroups
 | |
| 		 * allow detaching with invalid FD (prog==NULL)
 | |
| 		 */
 | |
| 		pl = list_first_entry(progs, typeof(*pl), node);
 | |
| 		old_prog = pl->prog;
 | |
| 		pl->prog = NULL;
 | |
| 	}
 | |
| 
 | |
| 	err = update_effective_progs(cgrp, type);
 | |
| 	if (err)
 | |
| 		goto cleanup;
 | |
| 
 | |
| 	/* now can actually delete it from this cgroup list */
 | |
| 	list_del(&pl->node);
 | |
| 	for_each_cgroup_storage_type(stype) {
 | |
| 		bpf_cgroup_storage_unlink(pl->storage[stype]);
 | |
| 		bpf_cgroup_storage_free(pl->storage[stype]);
 | |
| 	}
 | |
| 	kfree(pl);
 | |
| 	if (list_empty(progs))
 | |
| 		/* last program was detached, reset flags to zero */
 | |
| 		cgrp->bpf.flags[type] = 0;
 | |
| 
 | |
| 	bpf_prog_put(old_prog);
 | |
| 	static_branch_dec(&cgroup_bpf_enabled_key);
 | |
| 	return 0;
 | |
| 
 | |
| cleanup:
 | |
| 	/* and restore back old_prog */
 | |
| 	pl->prog = old_prog;
 | |
| 	return err;
 | |
| }
 | |
| 
 | |
| /* Must be called with cgroup_mutex held to avoid races. */
 | |
| int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
 | |
| 		       union bpf_attr __user *uattr)
 | |
| {
 | |
| 	__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
 | |
| 	enum bpf_attach_type type = attr->query.attach_type;
 | |
| 	struct list_head *progs = &cgrp->bpf.progs[type];
 | |
| 	u32 flags = cgrp->bpf.flags[type];
 | |
| 	int cnt, ret = 0, i;
 | |
| 
 | |
| 	if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
 | |
| 		cnt = bpf_prog_array_length(cgrp->bpf.effective[type]);
 | |
| 	else
 | |
| 		cnt = prog_list_length(progs);
 | |
| 
 | |
| 	if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
 | |
| 		return -EFAULT;
 | |
| 	if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
 | |
| 		return -EFAULT;
 | |
| 	if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
 | |
| 		/* return early if user requested only program count + flags */
 | |
| 		return 0;
 | |
| 	if (attr->query.prog_cnt < cnt) {
 | |
| 		cnt = attr->query.prog_cnt;
 | |
| 		ret = -ENOSPC;
 | |
| 	}
 | |
| 
 | |
| 	if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
 | |
| 		return bpf_prog_array_copy_to_user(cgrp->bpf.effective[type],
 | |
| 						   prog_ids, cnt);
 | |
| 	} else {
 | |
| 		struct bpf_prog_list *pl;
 | |
| 		u32 id;
 | |
| 
 | |
| 		i = 0;
 | |
| 		list_for_each_entry(pl, progs, node) {
 | |
| 			id = pl->prog->aux->id;
 | |
| 			if (copy_to_user(prog_ids + i, &id, sizeof(id)))
 | |
| 				return -EFAULT;
 | |
| 			if (++i == cnt)
 | |
| 				break;
 | |
| 		}
 | |
| 	}
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| int cgroup_bpf_prog_attach(const union bpf_attr *attr,
 | |
| 			   enum bpf_prog_type ptype, struct bpf_prog *prog)
 | |
| {
 | |
| 	struct cgroup *cgrp;
 | |
| 	int ret;
 | |
| 
 | |
| 	cgrp = cgroup_get_from_fd(attr->target_fd);
 | |
| 	if (IS_ERR(cgrp))
 | |
| 		return PTR_ERR(cgrp);
 | |
| 
 | |
| 	ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
 | |
| 				attr->attach_flags);
 | |
| 	cgroup_put(cgrp);
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
 | |
| {
 | |
| 	struct bpf_prog *prog;
 | |
| 	struct cgroup *cgrp;
 | |
| 	int ret;
 | |
| 
 | |
| 	cgrp = cgroup_get_from_fd(attr->target_fd);
 | |
| 	if (IS_ERR(cgrp))
 | |
| 		return PTR_ERR(cgrp);
 | |
| 
 | |
| 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
 | |
| 	if (IS_ERR(prog))
 | |
| 		prog = NULL;
 | |
| 
 | |
| 	ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
 | |
| 	if (prog)
 | |
| 		bpf_prog_put(prog);
 | |
| 
 | |
| 	cgroup_put(cgrp);
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| int cgroup_bpf_prog_query(const union bpf_attr *attr,
 | |
| 			  union bpf_attr __user *uattr)
 | |
| {
 | |
| 	struct cgroup *cgrp;
 | |
| 	int ret;
 | |
| 
 | |
| 	cgrp = cgroup_get_from_fd(attr->query.target_fd);
 | |
| 	if (IS_ERR(cgrp))
 | |
| 		return PTR_ERR(cgrp);
 | |
| 
 | |
| 	ret = cgroup_bpf_query(cgrp, attr, uattr);
 | |
| 
 | |
| 	cgroup_put(cgrp);
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
 | |
|  * @sk: The socket sending or receiving traffic
 | |
|  * @skb: The skb that is being sent or received
 | |
|  * @type: The type of program to be exectuted
 | |
|  *
 | |
|  * If no socket is passed, or the socket is not of type INET or INET6,
 | |
|  * this function does nothing and returns 0.
 | |
|  *
 | |
|  * The program type passed in via @type must be suitable for network
 | |
|  * filtering. No further check is performed to assert that.
 | |
|  *
 | |
|  * This function will return %-EPERM if any if an attached program was found
 | |
|  * and if it returned != 1 during execution. In all other cases, 0 is returned.
 | |
|  */
 | |
| int __cgroup_bpf_run_filter_skb(struct sock *sk,
 | |
| 				struct sk_buff *skb,
 | |
| 				enum bpf_attach_type type)
 | |
| {
 | |
| 	unsigned int offset = skb->data - skb_network_header(skb);
 | |
| 	struct sock *save_sk;
 | |
| 	void *saved_data_end;
 | |
| 	struct cgroup *cgrp;
 | |
| 	int ret;
 | |
| 
 | |
| 	if (!sk || !sk_fullsock(sk))
 | |
| 		return 0;
 | |
| 
 | |
| 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
 | |
| 		return 0;
 | |
| 
 | |
| 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
 | |
| 	save_sk = skb->sk;
 | |
| 	skb->sk = sk;
 | |
| 	__skb_push(skb, offset);
 | |
| 
 | |
| 	/* compute pointers for the bpf prog */
 | |
| 	bpf_compute_and_save_data_end(skb, &saved_data_end);
 | |
| 
 | |
| 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
 | |
| 				 __bpf_prog_run_save_cb);
 | |
| 	bpf_restore_data_end(skb, saved_data_end);
 | |
| 	__skb_pull(skb, offset);
 | |
| 	skb->sk = save_sk;
 | |
| 	return ret == 1 ? 0 : -EPERM;
 | |
| }
 | |
| EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
 | |
| 
 | |
| /**
 | |
|  * __cgroup_bpf_run_filter_sk() - Run a program on a sock
 | |
|  * @sk: sock structure to manipulate
 | |
|  * @type: The type of program to be exectuted
 | |
|  *
 | |
|  * socket is passed is expected to be of type INET or INET6.
 | |
|  *
 | |
|  * The program type passed in via @type must be suitable for sock
 | |
|  * filtering. No further check is performed to assert that.
 | |
|  *
 | |
|  * This function will return %-EPERM if any if an attached program was found
 | |
|  * and if it returned != 1 during execution. In all other cases, 0 is returned.
 | |
|  */
 | |
| int __cgroup_bpf_run_filter_sk(struct sock *sk,
 | |
| 			       enum bpf_attach_type type)
 | |
| {
 | |
| 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
 | |
| 	int ret;
 | |
| 
 | |
| 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
 | |
| 	return ret == 1 ? 0 : -EPERM;
 | |
| }
 | |
| EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
 | |
| 
 | |
| /**
 | |
|  * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
 | |
|  *                                       provided by user sockaddr
 | |
|  * @sk: sock struct that will use sockaddr
 | |
|  * @uaddr: sockaddr struct provided by user
 | |
|  * @type: The type of program to be exectuted
 | |
|  * @t_ctx: Pointer to attach type specific context
 | |
|  *
 | |
|  * socket is expected to be of type INET or INET6.
 | |
|  *
 | |
|  * This function will return %-EPERM if an attached program is found and
 | |
|  * returned value != 1 during execution. In all other cases, 0 is returned.
 | |
|  */
 | |
| int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
 | |
| 				      struct sockaddr *uaddr,
 | |
| 				      enum bpf_attach_type type,
 | |
| 				      void *t_ctx)
 | |
| {
 | |
| 	struct bpf_sock_addr_kern ctx = {
 | |
| 		.sk = sk,
 | |
| 		.uaddr = uaddr,
 | |
| 		.t_ctx = t_ctx,
 | |
| 	};
 | |
| 	struct sockaddr_storage unspec;
 | |
| 	struct cgroup *cgrp;
 | |
| 	int ret;
 | |
| 
 | |
| 	/* Check socket family since not all sockets represent network
 | |
| 	 * endpoint (e.g. AF_UNIX).
 | |
| 	 */
 | |
| 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
 | |
| 		return 0;
 | |
| 
 | |
| 	if (!ctx.uaddr) {
 | |
| 		memset(&unspec, 0, sizeof(unspec));
 | |
| 		ctx.uaddr = (struct sockaddr *)&unspec;
 | |
| 	}
 | |
| 
 | |
| 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
 | |
| 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
 | |
| 
 | |
| 	return ret == 1 ? 0 : -EPERM;
 | |
| }
 | |
| EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
 | |
| 
 | |
| /**
 | |
|  * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
 | |
|  * @sk: socket to get cgroup from
 | |
|  * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
 | |
|  * sk with connection information (IP addresses, etc.) May not contain
 | |
|  * cgroup info if it is a req sock.
 | |
|  * @type: The type of program to be exectuted
 | |
|  *
 | |
|  * socket passed is expected to be of type INET or INET6.
 | |
|  *
 | |
|  * The program type passed in via @type must be suitable for sock_ops
 | |
|  * filtering. No further check is performed to assert that.
 | |
|  *
 | |
|  * This function will return %-EPERM if any if an attached program was found
 | |
|  * and if it returned != 1 during execution. In all other cases, 0 is returned.
 | |
|  */
 | |
| int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
 | |
| 				     struct bpf_sock_ops_kern *sock_ops,
 | |
| 				     enum bpf_attach_type type)
 | |
| {
 | |
| 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
 | |
| 	int ret;
 | |
| 
 | |
| 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops,
 | |
| 				 BPF_PROG_RUN);
 | |
| 	return ret == 1 ? 0 : -EPERM;
 | |
| }
 | |
| EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
 | |
| 
 | |
| int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
 | |
| 				      short access, enum bpf_attach_type type)
 | |
| {
 | |
| 	struct cgroup *cgrp;
 | |
| 	struct bpf_cgroup_dev_ctx ctx = {
 | |
| 		.access_type = (access << 16) | dev_type,
 | |
| 		.major = major,
 | |
| 		.minor = minor,
 | |
| 	};
 | |
| 	int allow = 1;
 | |
| 
 | |
| 	rcu_read_lock();
 | |
| 	cgrp = task_dfl_cgroup(current);
 | |
| 	allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx,
 | |
| 				   BPF_PROG_RUN);
 | |
| 	rcu_read_unlock();
 | |
| 
 | |
| 	return !allow;
 | |
| }
 | |
| EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission);
 | |
| 
 | |
| static const struct bpf_func_proto *
 | |
| cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 | |
| {
 | |
| 	switch (func_id) {
 | |
| 	case BPF_FUNC_map_lookup_elem:
 | |
| 		return &bpf_map_lookup_elem_proto;
 | |
| 	case BPF_FUNC_map_update_elem:
 | |
| 		return &bpf_map_update_elem_proto;
 | |
| 	case BPF_FUNC_map_delete_elem:
 | |
| 		return &bpf_map_delete_elem_proto;
 | |
| 	case BPF_FUNC_map_push_elem:
 | |
| 		return &bpf_map_push_elem_proto;
 | |
| 	case BPF_FUNC_map_pop_elem:
 | |
| 		return &bpf_map_pop_elem_proto;
 | |
| 	case BPF_FUNC_map_peek_elem:
 | |
| 		return &bpf_map_peek_elem_proto;
 | |
| 	case BPF_FUNC_get_current_uid_gid:
 | |
| 		return &bpf_get_current_uid_gid_proto;
 | |
| 	case BPF_FUNC_get_local_storage:
 | |
| 		return &bpf_get_local_storage_proto;
 | |
| 	case BPF_FUNC_get_current_cgroup_id:
 | |
| 		return &bpf_get_current_cgroup_id_proto;
 | |
| 	case BPF_FUNC_trace_printk:
 | |
| 		if (capable(CAP_SYS_ADMIN))
 | |
| 			return bpf_get_trace_printk_proto();
 | |
| 		/* fall through */
 | |
| 	default:
 | |
| 		return NULL;
 | |
| 	}
 | |
| }
 | |
| 
 | |
| static const struct bpf_func_proto *
 | |
| cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 | |
| {
 | |
| 	return cgroup_base_func_proto(func_id, prog);
 | |
| }
 | |
| 
 | |
| static bool cgroup_dev_is_valid_access(int off, int size,
 | |
| 				       enum bpf_access_type type,
 | |
| 				       const struct bpf_prog *prog,
 | |
| 				       struct bpf_insn_access_aux *info)
 | |
| {
 | |
| 	const int size_default = sizeof(__u32);
 | |
| 
 | |
| 	if (type == BPF_WRITE)
 | |
| 		return false;
 | |
| 
 | |
| 	if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
 | |
| 		return false;
 | |
| 	/* The verifier guarantees that size > 0. */
 | |
| 	if (off % size != 0)
 | |
| 		return false;
 | |
| 
 | |
| 	switch (off) {
 | |
| 	case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
 | |
| 		bpf_ctx_record_field_size(info, size_default);
 | |
| 		if (!bpf_ctx_narrow_access_ok(off, size, size_default))
 | |
| 			return false;
 | |
| 		break;
 | |
| 	default:
 | |
| 		if (size != size_default)
 | |
| 			return false;
 | |
| 	}
 | |
| 
 | |
| 	return true;
 | |
| }
 | |
| 
 | |
| const struct bpf_prog_ops cg_dev_prog_ops = {
 | |
| };
 | |
| 
 | |
| const struct bpf_verifier_ops cg_dev_verifier_ops = {
 | |
| 	.get_func_proto		= cgroup_dev_func_proto,
 | |
| 	.is_valid_access	= cgroup_dev_is_valid_access,
 | |
| };
 | |
| 
 | |
| /**
 | |
|  * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
 | |
|  *
 | |
|  * @head: sysctl table header
 | |
|  * @table: sysctl table
 | |
|  * @write: sysctl is being read (= 0) or written (= 1)
 | |
|  * @buf: pointer to buffer passed by user space
 | |
|  * @pcount: value-result argument: value is size of buffer pointed to by @buf,
 | |
|  *	result is size of @new_buf if program set new value, initial value
 | |
|  *	otherwise
 | |
|  * @ppos: value-result argument: value is position at which read from or write
 | |
|  *	to sysctl is happening, result is new position if program overrode it,
 | |
|  *	initial value otherwise
 | |
|  * @new_buf: pointer to pointer to new buffer that will be allocated if program
 | |
|  *	overrides new value provided by user space on sysctl write
 | |
|  *	NOTE: it's caller responsibility to free *new_buf if it was set
 | |
|  * @type: type of program to be executed
 | |
|  *
 | |
|  * Program is run when sysctl is being accessed, either read or written, and
 | |
|  * can allow or deny such access.
 | |
|  *
 | |
|  * This function will return %-EPERM if an attached program is found and
 | |
|  * returned value != 1 during execution. In all other cases 0 is returned.
 | |
|  */
 | |
| int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
 | |
| 				   struct ctl_table *table, int write,
 | |
| 				   void __user *buf, size_t *pcount,
 | |
| 				   loff_t *ppos, void **new_buf,
 | |
| 				   enum bpf_attach_type type)
 | |
| {
 | |
| 	struct bpf_sysctl_kern ctx = {
 | |
| 		.head = head,
 | |
| 		.table = table,
 | |
| 		.write = write,
 | |
| 		.ppos = ppos,
 | |
| 		.cur_val = NULL,
 | |
| 		.cur_len = PAGE_SIZE,
 | |
| 		.new_val = NULL,
 | |
| 		.new_len = 0,
 | |
| 		.new_updated = 0,
 | |
| 	};
 | |
| 	struct cgroup *cgrp;
 | |
| 	int ret;
 | |
| 
 | |
| 	ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
 | |
| 	if (ctx.cur_val) {
 | |
| 		mm_segment_t old_fs;
 | |
| 		loff_t pos = 0;
 | |
| 
 | |
| 		old_fs = get_fs();
 | |
| 		set_fs(KERNEL_DS);
 | |
| 		if (table->proc_handler(table, 0, (void __user *)ctx.cur_val,
 | |
| 					&ctx.cur_len, &pos)) {
 | |
| 			/* Let BPF program decide how to proceed. */
 | |
| 			ctx.cur_len = 0;
 | |
| 		}
 | |
| 		set_fs(old_fs);
 | |
| 	} else {
 | |
| 		/* Let BPF program decide how to proceed. */
 | |
| 		ctx.cur_len = 0;
 | |
| 	}
 | |
| 
 | |
| 	if (write && buf && *pcount) {
 | |
| 		/* BPF program should be able to override new value with a
 | |
| 		 * buffer bigger than provided by user.
 | |
| 		 */
 | |
| 		ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
 | |
| 		ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
 | |
| 		if (!ctx.new_val ||
 | |
| 		    copy_from_user(ctx.new_val, buf, ctx.new_len))
 | |
| 			/* Let BPF program decide how to proceed. */
 | |
| 			ctx.new_len = 0;
 | |
| 	}
 | |
| 
 | |
| 	rcu_read_lock();
 | |
| 	cgrp = task_dfl_cgroup(current);
 | |
| 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
 | |
| 	rcu_read_unlock();
 | |
| 
 | |
| 	kfree(ctx.cur_val);
 | |
| 
 | |
| 	if (ret == 1 && ctx.new_updated) {
 | |
| 		*new_buf = ctx.new_val;
 | |
| 		*pcount = ctx.new_len;
 | |
| 	} else {
 | |
| 		kfree(ctx.new_val);
 | |
| 	}
 | |
| 
 | |
| 	return ret == 1 ? 0 : -EPERM;
 | |
| }
 | |
| EXPORT_SYMBOL(__cgroup_bpf_run_filter_sysctl);
 | |
| 
 | |
| static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
 | |
| 			      size_t *lenp)
 | |
| {
 | |
| 	ssize_t tmp_ret = 0, ret;
 | |
| 
 | |
| 	if (dir->header.parent) {
 | |
| 		tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
 | |
| 		if (tmp_ret < 0)
 | |
| 			return tmp_ret;
 | |
| 	}
 | |
| 
 | |
| 	ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
 | |
| 	if (ret < 0)
 | |
| 		return ret;
 | |
| 	*bufp += ret;
 | |
| 	*lenp -= ret;
 | |
| 	ret += tmp_ret;
 | |
| 
 | |
| 	/* Avoid leading slash. */
 | |
| 	if (!ret)
 | |
| 		return ret;
 | |
| 
 | |
| 	tmp_ret = strscpy(*bufp, "/", *lenp);
 | |
| 	if (tmp_ret < 0)
 | |
| 		return tmp_ret;
 | |
| 	*bufp += tmp_ret;
 | |
| 	*lenp -= tmp_ret;
 | |
| 
 | |
| 	return ret + tmp_ret;
 | |
| }
 | |
| 
 | |
| BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
 | |
| 	   size_t, buf_len, u64, flags)
 | |
| {
 | |
| 	ssize_t tmp_ret = 0, ret;
 | |
| 
 | |
| 	if (!buf)
 | |
| 		return -EINVAL;
 | |
| 
 | |
| 	if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
 | |
| 		if (!ctx->head)
 | |
| 			return -EINVAL;
 | |
| 		tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
 | |
| 		if (tmp_ret < 0)
 | |
| 			return tmp_ret;
 | |
| 	}
 | |
| 
 | |
| 	ret = strscpy(buf, ctx->table->procname, buf_len);
 | |
| 
 | |
| 	return ret < 0 ? ret : tmp_ret + ret;
 | |
| }
 | |
| 
 | |
| static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
 | |
| 	.func		= bpf_sysctl_get_name,
 | |
| 	.gpl_only	= false,
 | |
| 	.ret_type	= RET_INTEGER,
 | |
| 	.arg1_type	= ARG_PTR_TO_CTX,
 | |
| 	.arg2_type	= ARG_PTR_TO_MEM,
 | |
| 	.arg3_type	= ARG_CONST_SIZE,
 | |
| 	.arg4_type	= ARG_ANYTHING,
 | |
| };
 | |
| 
 | |
| static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
 | |
| 			     size_t src_len)
 | |
| {
 | |
| 	if (!dst)
 | |
| 		return -EINVAL;
 | |
| 
 | |
| 	if (!dst_len)
 | |
| 		return -E2BIG;
 | |
| 
 | |
| 	if (!src || !src_len) {
 | |
| 		memset(dst, 0, dst_len);
 | |
| 		return -EINVAL;
 | |
| 	}
 | |
| 
 | |
| 	memcpy(dst, src, min(dst_len, src_len));
 | |
| 
 | |
| 	if (dst_len > src_len) {
 | |
| 		memset(dst + src_len, '\0', dst_len - src_len);
 | |
| 		return src_len;
 | |
| 	}
 | |
| 
 | |
| 	dst[dst_len - 1] = '\0';
 | |
| 
 | |
| 	return -E2BIG;
 | |
| }
 | |
| 
 | |
| BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
 | |
| 	   char *, buf, size_t, buf_len)
 | |
| {
 | |
| 	return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
 | |
| }
 | |
| 
 | |
| static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
 | |
| 	.func		= bpf_sysctl_get_current_value,
 | |
| 	.gpl_only	= false,
 | |
| 	.ret_type	= RET_INTEGER,
 | |
| 	.arg1_type	= ARG_PTR_TO_CTX,
 | |
| 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
 | |
| 	.arg3_type	= ARG_CONST_SIZE,
 | |
| };
 | |
| 
 | |
| BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
 | |
| 	   size_t, buf_len)
 | |
| {
 | |
| 	if (!ctx->write) {
 | |
| 		if (buf && buf_len)
 | |
| 			memset(buf, '\0', buf_len);
 | |
| 		return -EINVAL;
 | |
| 	}
 | |
| 	return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
 | |
| }
 | |
| 
 | |
| static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
 | |
| 	.func		= bpf_sysctl_get_new_value,
 | |
| 	.gpl_only	= false,
 | |
| 	.ret_type	= RET_INTEGER,
 | |
| 	.arg1_type	= ARG_PTR_TO_CTX,
 | |
| 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
 | |
| 	.arg3_type	= ARG_CONST_SIZE,
 | |
| };
 | |
| 
 | |
| BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
 | |
| 	   const char *, buf, size_t, buf_len)
 | |
| {
 | |
| 	if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
 | |
| 		return -EINVAL;
 | |
| 
 | |
| 	if (buf_len > PAGE_SIZE - 1)
 | |
| 		return -E2BIG;
 | |
| 
 | |
| 	memcpy(ctx->new_val, buf, buf_len);
 | |
| 	ctx->new_len = buf_len;
 | |
| 	ctx->new_updated = 1;
 | |
| 
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
 | |
| 	.func		= bpf_sysctl_set_new_value,
 | |
| 	.gpl_only	= false,
 | |
| 	.ret_type	= RET_INTEGER,
 | |
| 	.arg1_type	= ARG_PTR_TO_CTX,
 | |
| 	.arg2_type	= ARG_PTR_TO_MEM,
 | |
| 	.arg3_type	= ARG_CONST_SIZE,
 | |
| };
 | |
| 
 | |
| static const struct bpf_func_proto *
 | |
| sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 | |
| {
 | |
| 	switch (func_id) {
 | |
| 	case BPF_FUNC_strtol:
 | |
| 		return &bpf_strtol_proto;
 | |
| 	case BPF_FUNC_strtoul:
 | |
| 		return &bpf_strtoul_proto;
 | |
| 	case BPF_FUNC_sysctl_get_name:
 | |
| 		return &bpf_sysctl_get_name_proto;
 | |
| 	case BPF_FUNC_sysctl_get_current_value:
 | |
| 		return &bpf_sysctl_get_current_value_proto;
 | |
| 	case BPF_FUNC_sysctl_get_new_value:
 | |
| 		return &bpf_sysctl_get_new_value_proto;
 | |
| 	case BPF_FUNC_sysctl_set_new_value:
 | |
| 		return &bpf_sysctl_set_new_value_proto;
 | |
| 	default:
 | |
| 		return cgroup_base_func_proto(func_id, prog);
 | |
| 	}
 | |
| }
 | |
| 
 | |
| static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
 | |
| 				   const struct bpf_prog *prog,
 | |
| 				   struct bpf_insn_access_aux *info)
 | |
| {
 | |
| 	const int size_default = sizeof(__u32);
 | |
| 
 | |
| 	if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
 | |
| 		return false;
 | |
| 
 | |
| 	switch (off) {
 | |
| 	case offsetof(struct bpf_sysctl, write):
 | |
| 		if (type != BPF_READ)
 | |
| 			return false;
 | |
| 		bpf_ctx_record_field_size(info, size_default);
 | |
| 		return bpf_ctx_narrow_access_ok(off, size, size_default);
 | |
| 	case offsetof(struct bpf_sysctl, file_pos):
 | |
| 		if (type == BPF_READ) {
 | |
| 			bpf_ctx_record_field_size(info, size_default);
 | |
| 			return bpf_ctx_narrow_access_ok(off, size, size_default);
 | |
| 		} else {
 | |
| 			return size == size_default;
 | |
| 		}
 | |
| 	default:
 | |
| 		return false;
 | |
| 	}
 | |
| }
 | |
| 
 | |
| static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
 | |
| 				     const struct bpf_insn *si,
 | |
| 				     struct bpf_insn *insn_buf,
 | |
| 				     struct bpf_prog *prog, u32 *target_size)
 | |
| {
 | |
| 	struct bpf_insn *insn = insn_buf;
 | |
| 
 | |
| 	switch (si->off) {
 | |
| 	case offsetof(struct bpf_sysctl, write):
 | |
| 		*insn++ = BPF_LDX_MEM(
 | |
| 			BPF_SIZE(si->code), si->dst_reg, si->src_reg,
 | |
| 			bpf_target_off(struct bpf_sysctl_kern, write,
 | |
| 				       FIELD_SIZEOF(struct bpf_sysctl_kern,
 | |
| 						    write),
 | |
| 				       target_size));
 | |
| 		break;
 | |
| 	case offsetof(struct bpf_sysctl, file_pos):
 | |
| 		/* ppos is a pointer so it should be accessed via indirect
 | |
| 		 * loads and stores. Also for stores additional temporary
 | |
| 		 * register is used since neither src_reg nor dst_reg can be
 | |
| 		 * overridden.
 | |
| 		 */
 | |
| 		if (type == BPF_WRITE) {
 | |
| 			int treg = BPF_REG_9;
 | |
| 
 | |
| 			if (si->src_reg == treg || si->dst_reg == treg)
 | |
| 				--treg;
 | |
| 			if (si->src_reg == treg || si->dst_reg == treg)
 | |
| 				--treg;
 | |
| 			*insn++ = BPF_STX_MEM(
 | |
| 				BPF_DW, si->dst_reg, treg,
 | |
| 				offsetof(struct bpf_sysctl_kern, tmp_reg));
 | |
| 			*insn++ = BPF_LDX_MEM(
 | |
| 				BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
 | |
| 				treg, si->dst_reg,
 | |
| 				offsetof(struct bpf_sysctl_kern, ppos));
 | |
| 			*insn++ = BPF_STX_MEM(
 | |
| 				BPF_SIZEOF(u32), treg, si->src_reg, 0);
 | |
| 			*insn++ = BPF_LDX_MEM(
 | |
| 				BPF_DW, treg, si->dst_reg,
 | |
| 				offsetof(struct bpf_sysctl_kern, tmp_reg));
 | |
| 		} else {
 | |
| 			*insn++ = BPF_LDX_MEM(
 | |
| 				BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
 | |
| 				si->dst_reg, si->src_reg,
 | |
| 				offsetof(struct bpf_sysctl_kern, ppos));
 | |
| 			*insn++ = BPF_LDX_MEM(
 | |
| 				BPF_SIZE(si->code), si->dst_reg, si->dst_reg, 0);
 | |
| 		}
 | |
| 		*target_size = sizeof(u32);
 | |
| 		break;
 | |
| 	}
 | |
| 
 | |
| 	return insn - insn_buf;
 | |
| }
 | |
| 
 | |
| const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
 | |
| 	.get_func_proto		= sysctl_func_proto,
 | |
| 	.is_valid_access	= sysctl_is_valid_access,
 | |
| 	.convert_ctx_access	= sysctl_convert_ctx_access,
 | |
| };
 | |
| 
 | |
| const struct bpf_prog_ops cg_sysctl_prog_ops = {
 | |
| };
 |