mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	bpf: Make BPF_PROG_RUN_ARRAY return -err instead of allow boolean
Right now BPF_PROG_RUN_ARRAY and related macros return 1 or 0 for whether the prog array allows or rejects whatever is being hooked. The caller of these macros then return -EPERM or continue processing based on thw macro's return value. Unforunately this is inflexible, since -EPERM is the only err that can be returned. This patch should be a no-op; it prepares for the next patch. The returning of the -EPERM is moved to inside the macros, so the outer functions are directly returning what the macros returned if they are non-zero. Signed-off-by: YiFei Zhu <zhuyifei@google.com> Reviewed-by: Stanislav Fomichev <sdf@google.com> Link: https://lore.kernel.org/r/788abcdca55886d1f43274c918eaa9f792a9f33b.1639619851.git.zhuyifei@google.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
		
							parent
							
								
									d81283d272
								
							
						
					
					
						commit
						f10d059661
					
				
					 3 changed files with 25 additions and 34 deletions
				
			
		| 
						 | 
				
			
			@ -1277,7 +1277,7 @@ static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
 | 
			
		|||
 | 
			
		||||
typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
 | 
			
		||||
 | 
			
		||||
static __always_inline u32
 | 
			
		||||
static __always_inline int
 | 
			
		||||
BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
 | 
			
		||||
			    const void *ctx, bpf_prog_run_fn run_prog,
 | 
			
		||||
			    u32 *ret_flags)
 | 
			
		||||
| 
						 | 
				
			
			@ -1287,7 +1287,7 @@ BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
 | 
			
		|||
	const struct bpf_prog_array *array;
 | 
			
		||||
	struct bpf_run_ctx *old_run_ctx;
 | 
			
		||||
	struct bpf_cg_run_ctx run_ctx;
 | 
			
		||||
	u32 ret = 1;
 | 
			
		||||
	int ret = 0;
 | 
			
		||||
	u32 func_ret;
 | 
			
		||||
 | 
			
		||||
	migrate_disable();
 | 
			
		||||
| 
						 | 
				
			
			@ -1298,7 +1298,8 @@ BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
 | 
			
		|||
	while ((prog = READ_ONCE(item->prog))) {
 | 
			
		||||
		run_ctx.prog_item = item;
 | 
			
		||||
		func_ret = run_prog(prog, ctx);
 | 
			
		||||
		ret &= (func_ret & 1);
 | 
			
		||||
		if (!(func_ret & 1))
 | 
			
		||||
			ret = -EPERM;
 | 
			
		||||
		*(ret_flags) |= (func_ret >> 1);
 | 
			
		||||
		item++;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -1308,7 +1309,7 @@ BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
 | 
			
		|||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static __always_inline u32
 | 
			
		||||
static __always_inline int
 | 
			
		||||
BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu,
 | 
			
		||||
		      const void *ctx, bpf_prog_run_fn run_prog)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -1317,7 +1318,7 @@ BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu,
 | 
			
		|||
	const struct bpf_prog_array *array;
 | 
			
		||||
	struct bpf_run_ctx *old_run_ctx;
 | 
			
		||||
	struct bpf_cg_run_ctx run_ctx;
 | 
			
		||||
	u32 ret = 1;
 | 
			
		||||
	int ret = 0;
 | 
			
		||||
 | 
			
		||||
	migrate_disable();
 | 
			
		||||
	rcu_read_lock();
 | 
			
		||||
| 
						 | 
				
			
			@ -1326,7 +1327,8 @@ BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu,
 | 
			
		|||
	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
 | 
			
		||||
	while ((prog = READ_ONCE(item->prog))) {
 | 
			
		||||
		run_ctx.prog_item = item;
 | 
			
		||||
		ret &= run_prog(prog, ctx);
 | 
			
		||||
		if (!run_prog(prog, ctx))
 | 
			
		||||
			ret = -EPERM;
 | 
			
		||||
		item++;
 | 
			
		||||
	}
 | 
			
		||||
	bpf_reset_run_ctx(old_run_ctx);
 | 
			
		||||
| 
						 | 
				
			
			@ -1394,7 +1396,7 @@ BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu,
 | 
			
		|||
		u32 _ret;				\
 | 
			
		||||
		_ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \
 | 
			
		||||
		_cn = _flags & BPF_RET_SET_CN;		\
 | 
			
		||||
		if (_ret)				\
 | 
			
		||||
		if (!_ret)				\
 | 
			
		||||
			_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);	\
 | 
			
		||||
		else					\
 | 
			
		||||
			_ret = (_cn ? NET_XMIT_DROP : -EPERM);		\
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1080,7 +1080,6 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
 | 
			
		|||
	} else {
 | 
			
		||||
		ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], skb,
 | 
			
		||||
					    __bpf_prog_run_save_cb);
 | 
			
		||||
		ret = (ret == 1 ? 0 : -EPERM);
 | 
			
		||||
	}
 | 
			
		||||
	bpf_restore_data_end(skb, saved_data_end);
 | 
			
		||||
	__skb_pull(skb, offset);
 | 
			
		||||
| 
						 | 
				
			
			@ -1107,10 +1106,9 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
 | 
			
		|||
			       enum cgroup_bpf_attach_type atype)
 | 
			
		||||
{
 | 
			
		||||
	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sk, bpf_prog_run);
 | 
			
		||||
	return ret == 1 ? 0 : -EPERM;
 | 
			
		||||
	return BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sk,
 | 
			
		||||
				     bpf_prog_run);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1142,7 +1140,6 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
 | 
			
		|||
	};
 | 
			
		||||
	struct sockaddr_storage unspec;
 | 
			
		||||
	struct cgroup *cgrp;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	/* Check socket family since not all sockets represent network
 | 
			
		||||
	 * endpoint (e.g. AF_UNIX).
 | 
			
		||||
| 
						 | 
				
			
			@ -1156,10 +1153,8 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
 | 
			
		||||
	ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(cgrp->bpf.effective[atype], &ctx,
 | 
			
		||||
				          bpf_prog_run, flags);
 | 
			
		||||
 | 
			
		||||
	return ret == 1 ? 0 : -EPERM;
 | 
			
		||||
	return BPF_PROG_RUN_ARRAY_CG_FLAGS(cgrp->bpf.effective[atype], &ctx,
 | 
			
		||||
					   bpf_prog_run, flags);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1184,11 +1179,9 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
 | 
			
		|||
				     enum cgroup_bpf_attach_type atype)
 | 
			
		||||
{
 | 
			
		||||
	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sock_ops,
 | 
			
		||||
				    bpf_prog_run);
 | 
			
		||||
	return ret == 1 ? 0 : -EPERM;
 | 
			
		||||
	return BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sock_ops,
 | 
			
		||||
				     bpf_prog_run);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1201,15 +1194,15 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
 | 
			
		|||
		.major = major,
 | 
			
		||||
		.minor = minor,
 | 
			
		||||
	};
 | 
			
		||||
	int allow;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	rcu_read_lock();
 | 
			
		||||
	cgrp = task_dfl_cgroup(current);
 | 
			
		||||
	allow = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx,
 | 
			
		||||
				      bpf_prog_run);
 | 
			
		||||
	ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx,
 | 
			
		||||
				    bpf_prog_run);
 | 
			
		||||
	rcu_read_unlock();
 | 
			
		||||
 | 
			
		||||
	return !allow;
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static const struct bpf_func_proto *
 | 
			
		||||
| 
						 | 
				
			
			@ -1350,7 +1343,7 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
 | 
			
		|||
		kfree(ctx.new_val);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return ret == 1 ? 0 : -EPERM;
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_NET
 | 
			
		||||
| 
						 | 
				
			
			@ -1455,10 +1448,8 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
 | 
			
		|||
				    &ctx, bpf_prog_run);
 | 
			
		||||
	release_sock(sk);
 | 
			
		||||
 | 
			
		||||
	if (!ret) {
 | 
			
		||||
		ret = -EPERM;
 | 
			
		||||
	if (ret)
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (ctx.optlen == -1) {
 | 
			
		||||
		/* optlen set to -1, bypass kernel */
 | 
			
		||||
| 
						 | 
				
			
			@ -1565,10 +1556,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
 | 
			
		|||
				    &ctx, bpf_prog_run);
 | 
			
		||||
	release_sock(sk);
 | 
			
		||||
 | 
			
		||||
	if (!ret) {
 | 
			
		||||
		ret = -EPERM;
 | 
			
		||||
	if (ret)
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (ctx.optlen > max_optlen || ctx.optlen < 0) {
 | 
			
		||||
		ret = -EFAULT;
 | 
			
		||||
| 
						 | 
				
			
			@ -1624,8 +1613,8 @@ int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
 | 
			
		|||
 | 
			
		||||
	ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT],
 | 
			
		||||
				    &ctx, bpf_prog_run);
 | 
			
		||||
	if (!ret)
 | 
			
		||||
		return -EPERM;
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return ret;
 | 
			
		||||
 | 
			
		||||
	if (ctx.optlen > *optlen)
 | 
			
		||||
		return -EFAULT;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -838,7 +838,7 @@ int devcgroup_check_permission(short type, u32 major, u32 minor, short access)
 | 
			
		|||
	int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access);
 | 
			
		||||
 | 
			
		||||
	if (rc)
 | 
			
		||||
		return -EPERM;
 | 
			
		||||
		return rc;
 | 
			
		||||
 | 
			
		||||
	#ifdef CONFIG_CGROUP_DEVICE
 | 
			
		||||
	return devcgroup_legacy_check_permission(type, major, minor, access);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue