mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	bpf: Zero former ARG_PTR_TO_{LONG,INT} args in case of error
For all non-tracing helpers which formerly had ARG_PTR_TO_{LONG,INT} as input
arguments, zero the value for the case of an error as otherwise it could leak
memory. For tracing, it is not needed given CAP_PERFMON can already read all
kernel memory anyway hence bpf_get_func_arg() and bpf_get_func_ret() is skipped
in here.
Also, the MTU helpers mtu_len pointer value is being written but also read.
Technically, the MEM_UNINIT should not be there in order to always force init.
Removing MEM_UNINIT needs more verifier rework though: MEM_UNINIT right now
implies two things actually: i) write into memory, ii) memory does not have
to be initialized. If we lift MEM_UNINIT, it then becomes: i) read into memory,
ii) memory must be initialized. This means that for bpf_*_check_mtu() we're
readding the issue we're trying to fix, that is, it would then be able to
write back into things like .rodata BPF maps. Follow-up work will rework the
MEM_UNINIT semantics such that the intent can be better expressed. For now
just clear the *mtu_len on error path which can be lifted later again.
Fixes: 8a67f2de9b ("bpf: expose bpf_strtol and bpf_strtoul to all program types")
Fixes: d7a4cb9b67 ("bpf: Introduce bpf_strtol and bpf_strtoul helpers")
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/e5edd241-59e7-5e39-0ee5-a51e31b6840a@iogearbox.net
Link: https://lore.kernel.org/r/20240913191754.13290-5-daniel@iogearbox.net
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
			
			
This commit is contained in:
		
							parent
							
								
									18752d73c1
								
							
						
					
					
						commit
						4b3786a6c5
					
				
					 3 changed files with 26 additions and 21 deletions
				
			
		|  | @ -523,6 +523,7 @@ BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, | ||||||
| 	long long _res; | 	long long _res; | ||||||
| 	int err; | 	int err; | ||||||
| 
 | 
 | ||||||
|  | 	*res = 0; | ||||||
| 	err = __bpf_strtoll(buf, buf_len, flags, &_res); | 	err = __bpf_strtoll(buf, buf_len, flags, &_res); | ||||||
| 	if (err < 0) | 	if (err < 0) | ||||||
| 		return err; | 		return err; | ||||||
|  | @ -548,6 +549,7 @@ BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, | ||||||
| 	bool is_negative; | 	bool is_negative; | ||||||
| 	int err; | 	int err; | ||||||
| 
 | 
 | ||||||
|  | 	*res = 0; | ||||||
| 	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); | 	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); | ||||||
| 	if (err < 0) | 	if (err < 0) | ||||||
| 		return err; | 		return err; | ||||||
|  |  | ||||||
|  | @ -5934,6 +5934,7 @@ static const struct bpf_func_proto bpf_sys_close_proto = { | ||||||
| 
 | 
 | ||||||
| BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) | BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) | ||||||
| { | { | ||||||
|  | 	*res = 0; | ||||||
| 	if (flags) | 	if (flags) | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -6262,20 +6262,25 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb, | ||||||
| 	int ret = BPF_MTU_CHK_RET_FRAG_NEEDED; | 	int ret = BPF_MTU_CHK_RET_FRAG_NEEDED; | ||||||
| 	struct net_device *dev = skb->dev; | 	struct net_device *dev = skb->dev; | ||||||
| 	int skb_len, dev_len; | 	int skb_len, dev_len; | ||||||
| 	int mtu; | 	int mtu = 0; | ||||||
| 
 | 
 | ||||||
| 	if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) | 	if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) { | ||||||
| 		return -EINVAL; | 		ret = -EINVAL; | ||||||
|  | 		goto out; | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) | 	if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) { | ||||||
| 		return -EINVAL; | 		ret = -EINVAL; | ||||||
|  | 		goto out; | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	dev = __dev_via_ifindex(dev, ifindex); | 	dev = __dev_via_ifindex(dev, ifindex); | ||||||
| 	if (unlikely(!dev)) | 	if (unlikely(!dev)) { | ||||||
| 		return -ENODEV; | 		ret = -ENODEV; | ||||||
|  | 		goto out; | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	mtu = READ_ONCE(dev->mtu); | 	mtu = READ_ONCE(dev->mtu); | ||||||
| 
 |  | ||||||
| 	dev_len = mtu + dev->hard_header_len; | 	dev_len = mtu + dev->hard_header_len; | ||||||
| 
 | 
 | ||||||
| 	/* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ | 	/* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ | ||||||
|  | @ -6293,15 +6298,12 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb, | ||||||
| 	 */ | 	 */ | ||||||
| 	if (skb_is_gso(skb)) { | 	if (skb_is_gso(skb)) { | ||||||
| 		ret = BPF_MTU_CHK_RET_SUCCESS; | 		ret = BPF_MTU_CHK_RET_SUCCESS; | ||||||
| 
 |  | ||||||
| 		if (flags & BPF_MTU_CHK_SEGS && | 		if (flags & BPF_MTU_CHK_SEGS && | ||||||
| 		    !skb_gso_validate_network_len(skb, mtu)) | 		    !skb_gso_validate_network_len(skb, mtu)) | ||||||
| 			ret = BPF_MTU_CHK_RET_SEGS_TOOBIG; | 			ret = BPF_MTU_CHK_RET_SEGS_TOOBIG; | ||||||
| 	} | 	} | ||||||
| out: | out: | ||||||
| 	/* BPF verifier guarantees valid pointer */ |  | ||||||
| 	*mtu_len = mtu; | 	*mtu_len = mtu; | ||||||
| 
 |  | ||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -6311,19 +6313,21 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp, | ||||||
| 	struct net_device *dev = xdp->rxq->dev; | 	struct net_device *dev = xdp->rxq->dev; | ||||||
| 	int xdp_len = xdp->data_end - xdp->data; | 	int xdp_len = xdp->data_end - xdp->data; | ||||||
| 	int ret = BPF_MTU_CHK_RET_SUCCESS; | 	int ret = BPF_MTU_CHK_RET_SUCCESS; | ||||||
| 	int mtu, dev_len; | 	int mtu = 0, dev_len; | ||||||
| 
 | 
 | ||||||
| 	/* XDP variant doesn't support multi-buffer segment check (yet) */ | 	/* XDP variant doesn't support multi-buffer segment check (yet) */ | ||||||
| 	if (unlikely(flags)) | 	if (unlikely(flags)) { | ||||||
| 		return -EINVAL; | 		ret = -EINVAL; | ||||||
|  | 		goto out; | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	dev = __dev_via_ifindex(dev, ifindex); | 	dev = __dev_via_ifindex(dev, ifindex); | ||||||
| 	if (unlikely(!dev)) | 	if (unlikely(!dev)) { | ||||||
| 		return -ENODEV; | 		ret = -ENODEV; | ||||||
|  | 		goto out; | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	mtu = READ_ONCE(dev->mtu); | 	mtu = READ_ONCE(dev->mtu); | ||||||
| 
 |  | ||||||
| 	/* Add L2-header as dev MTU is L3 size */ |  | ||||||
| 	dev_len = mtu + dev->hard_header_len; | 	dev_len = mtu + dev->hard_header_len; | ||||||
| 
 | 
 | ||||||
| 	/* Use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ | 	/* Use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ | ||||||
|  | @ -6333,10 +6337,8 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp, | ||||||
| 	xdp_len += len_diff; /* minus result pass check */ | 	xdp_len += len_diff; /* minus result pass check */ | ||||||
| 	if (xdp_len > dev_len) | 	if (xdp_len > dev_len) | ||||||
| 		ret = BPF_MTU_CHK_RET_FRAG_NEEDED; | 		ret = BPF_MTU_CHK_RET_FRAG_NEEDED; | ||||||
| 
 | out: | ||||||
| 	/* BPF verifier guarantees valid pointer */ |  | ||||||
| 	*mtu_len = mtu; | 	*mtu_len = mtu; | ||||||
| 
 |  | ||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Daniel Borkmann
						Daniel Borkmann