mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	bpf: Fix array bounds error with may_goto
may_goto uses an additional 8 bytes on the stack, which causes the
interpreters[] array to go out of bounds when calculating index by
stack_size.
1. If a BPF program is rewritten, re-evaluate the stack size. For non-JIT
cases, reject loading directly.
2. For non-JIT cases, calculating interpreters[idx] may still cause
out-of-bounds array access, and just warn about it.
3. For jit_requested cases, the execution of bpf_func also needs to be
warned. So move the definition of function __bpf_prog_ret0_warn out of
the macro definition CONFIG_BPF_JIT_ALWAYS_ON.
Reported-by: syzbot+d2a2c639d03ac200a4f1@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/bpf/0000000000000f823606139faa5d@google.com/
Fixes: 011832b97b ("bpf: Introduce may_goto instruction")
Signed-off-by: Jiayuan Chen <mrpre@163.com>
Link: https://lore.kernel.org/r/20250214091823.46042-2-mrpre@163.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
			
			
This commit is contained in:
		
							parent
							
								
									a4585442ad
								
							
						
					
					
						commit
						6ebc5030e0
					
				
					 2 changed files with 22 additions and 4 deletions
				
			
		|  | @ -2290,17 +2290,18 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth) | |||
| 	insn->code = BPF_JMP | BPF_CALL_ARGS; | ||||
| } | ||||
| #endif | ||||
| #else | ||||
| #endif | ||||
| 
 | ||||
| static unsigned int __bpf_prog_ret0_warn(const void *ctx, | ||||
| 					 const struct bpf_insn *insn) | ||||
| { | ||||
| 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
 | ||||
| 	 * is not working properly, so warn about it! | ||||
| 	 * is not working properly, or interpreter is being used when | ||||
| 	 * prog->jit_requested is not 0, so warn about it! | ||||
| 	 */ | ||||
| 	WARN_ON_ONCE(1); | ||||
| 	return 0; | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| bool bpf_prog_map_compatible(struct bpf_map *map, | ||||
| 			     const struct bpf_prog *fp) | ||||
|  | @ -2380,8 +2381,18 @@ static void bpf_prog_select_func(struct bpf_prog *fp) | |||
| { | ||||
| #ifndef CONFIG_BPF_JIT_ALWAYS_ON | ||||
| 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); | ||||
| 	u32 idx = (round_up(stack_depth, 32) / 32) - 1; | ||||
| 
 | ||||
| 	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; | ||||
| 	/* may_goto may cause stack size > 512, leading to idx out-of-bounds.
 | ||||
| 	 * But for non-JITed programs, we don't need bpf_func, so no bounds | ||||
| 	 * check needed. | ||||
| 	 */ | ||||
| 	if (!fp->jit_requested && | ||||
| 	    !WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) { | ||||
| 		fp->bpf_func = interpreters[idx]; | ||||
| 	} else { | ||||
| 		fp->bpf_func = __bpf_prog_ret0_warn; | ||||
| 	} | ||||
| #else | ||||
| 	fp->bpf_func = __bpf_prog_ret0_warn; | ||||
| #endif | ||||
|  |  | |||
|  | @ -21903,6 +21903,13 @@ static int do_misc_fixups(struct bpf_verifier_env *env) | |||
| 		if (subprogs[cur_subprog + 1].start == i + delta + 1) { | ||||
| 			subprogs[cur_subprog].stack_depth += stack_depth_extra; | ||||
| 			subprogs[cur_subprog].stack_extra = stack_depth_extra; | ||||
| 
 | ||||
| 			stack_depth = subprogs[cur_subprog].stack_depth; | ||||
| 			if (stack_depth > MAX_BPF_STACK && !prog->jit_requested) { | ||||
| 				verbose(env, "stack size %d(extra %d) is too large\n", | ||||
| 					stack_depth, stack_depth_extra); | ||||
| 				return -EINVAL; | ||||
| 			} | ||||
| 			cur_subprog++; | ||||
| 			stack_depth = subprogs[cur_subprog].stack_depth; | ||||
| 			stack_depth_extra = 0; | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Jiayuan Chen
						Jiayuan Chen