forked from mirrors/linux
		
	MIPS: BPF: Restore MIPS32 cBPF JIT
Commit716850ab10("MIPS: eBPF: Initial eBPF support for MIPS32 architecture.") enabled our eBPF JIT for MIPS32 kernels, whereas it has previously only been availailable for MIPS64. It was my understanding at the time that the BPF test suite was passing & JITing a comparable number of tests to our cBPF JIT [1], but it turns out that was not the case. The eBPF JIT has a number of problems on MIPS32: - Most notably various code paths still result in emission of MIPS64 instructions which will cause reserved instruction exceptions & kernel panics when run on MIPS32 CPUs. - The eBPF JIT doesn't account for differences between the O32 ABI used by MIPS32 kernels versus the N64 ABI used by MIPS64 kernels. Notably arguments beyond the first 4 are passed on the stack in O32, and this is entirely unhandled when JITing a BPF_CALL instruction. Stack space must be reserved for arguments even if they all fit in registers, and the callee is free to assume that stack space has been reserved for its use - with the eBPF JIT this is not the case, so calling any function can result in clobbering values on the stack & unpredictable behaviour. Function arguments in eBPF are always 64-bit values which is also entirely unhandled - the JIT still uses a single (32-bit) register per argument. As a result all function arguments are always passed incorrectly when JITing a BPF_CALL instruction, leading to kernel crashes or strange behavior. - The JIT attempts to bail our on use of ALU64 instructions or 64-bit memory access instructions. The code doing this at the start of build_one_insn() incorrectly checks whether BPF_OP() equals BPF_DW, when it should really be checking BPF_SIZE() & only doing so when BPF_CLASS() is one of BPF_{LD,LDX,ST,STX}. This results in false positives that cause more bailouts than intended, and that in turns hides some of the problems described above. - The kernel's cBPF->eBPF translation makes heavy use of 64-bit eBPF instructions that the MIPS32 eBPF JIT bails out on, leading to most cBPF programs not being JITed at all. Until these problems are resolved, revert the removal of the cBPF JIT performed by commit716850ab10("MIPS: eBPF: Initial eBPF support for MIPS32 architecture."). Together with commitf8fffebdea("MIPS: BPF: Disable MIPS32 eBPF JIT") this restores MIPS32 BPF JIT behavior back to the same state it was prior to the introduction of the broken eBPF JIT support. [1] https://lore.kernel.org/linux-mips/MWHPR2201MB13583388481F01A422CE7D66D4410@MWHPR2201MB1358.namprd22.prod.outlook.com/ Signed-off-by: Paul Burton <paulburton@kernel.org> Fixes:716850ab10("MIPS: eBPF: Initial eBPF support for MIPS32 architecture.") Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Hassan Naveed <hnaveed@wavecomp.com> Cc: Tony Ambardar <itugrok@yahoo.com> Cc: bpf@vger.kernel.org Cc: netdev@vger.kernel.org Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org
This commit is contained in:
		
							parent
							
								
									11d06df7b9
								
							
						
					
					
						commit
						36366e367e
					
				
					 4 changed files with 1557 additions and 0 deletions
				
			
		| 
						 | 
				
			
			@ -47,6 +47,7 @@ config MIPS
 | 
			
		|||
	select HAVE_ARCH_TRACEHOOK
 | 
			
		||||
	select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES
 | 
			
		||||
	select HAVE_ASM_MODVERSIONS
 | 
			
		||||
	select HAVE_CBPF_JIT if !64BIT && !CPU_MICROMIPS
 | 
			
		||||
	select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2
 | 
			
		||||
	select HAVE_CONTEXT_TRACKING
 | 
			
		||||
	select HAVE_COPY_THREAD_TLS
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,4 +1,5 @@
 | 
			
		|||
# SPDX-License-Identifier: GPL-2.0-only
 | 
			
		||||
# MIPS networking code
 | 
			
		||||
 | 
			
		||||
obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o
 | 
			
		||||
obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										1270
									
								
								arch/mips/net/bpf_jit.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1270
									
								
								arch/mips/net/bpf_jit.c
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
							
								
								
									
										285
									
								
								arch/mips/net/bpf_jit_asm.S
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										285
									
								
								arch/mips/net/bpf_jit_asm.S
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,285 @@
 | 
			
		|||
/*
 | 
			
		||||
 * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF
 | 
			
		||||
 * compiler.
 | 
			
		||||
 *
 | 
			
		||||
 * Copyright (C) 2015 Imagination Technologies Ltd.
 | 
			
		||||
 * Author: Markos Chandras <markos.chandras@imgtec.com>
 | 
			
		||||
 *
 | 
			
		||||
 * This program is free software; you can redistribute it and/or modify it
 | 
			
		||||
 * under the terms of the GNU General Public License as published by the
 | 
			
		||||
 * Free Software Foundation; version 2 of the License.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include <asm/asm.h>
 | 
			
		||||
#include <asm/isa-rev.h>
 | 
			
		||||
#include <asm/regdef.h>
 | 
			
		||||
#include "bpf_jit.h"
 | 
			
		||||
 | 
			
		||||
/* ABI
 | 
			
		||||
 *
 | 
			
		||||
 * r_skb_hl	skb header length
 | 
			
		||||
 * r_skb_data	skb data
 | 
			
		||||
 * r_off(a1)	offset register
 | 
			
		||||
 * r_A		BPF register A
 | 
			
		||||
 * r_X		PF register X
 | 
			
		||||
 * r_skb(a0)	*skb
 | 
			
		||||
 * r_M		*scratch memory
 | 
			
		||||
 * r_skb_le	skb length
 | 
			
		||||
 * r_s0		Scratch register 0
 | 
			
		||||
 * r_s1		Scratch register 1
 | 
			
		||||
 *
 | 
			
		||||
 * On entry:
 | 
			
		||||
 * a0: *skb
 | 
			
		||||
 * a1: offset (imm or imm + X)
 | 
			
		||||
 *
 | 
			
		||||
 * All non-BPF-ABI registers are free for use. On return, we only
 | 
			
		||||
 * care about r_ret. The BPF-ABI registers are assumed to remain
 | 
			
		||||
 * unmodified during the entire filter operation.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#define skb	a0
 | 
			
		||||
#define offset	a1
 | 
			
		||||
#define SKF_LL_OFF  (-0x200000) /* Can't include linux/filter.h in assembly */
 | 
			
		||||
 | 
			
		||||
	/* We know better :) so prevent assembler reordering etc */
 | 
			
		||||
	.set 	noreorder
 | 
			
		||||
 | 
			
		||||
#define is_offset_negative(TYPE)				\
 | 
			
		||||
	/* If offset is negative we have more work to do */	\
 | 
			
		||||
	slti	t0, offset, 0;					\
 | 
			
		||||
	bgtz	t0, bpf_slow_path_##TYPE##_neg;			\
 | 
			
		||||
	/* Be careful what follows in DS. */
 | 
			
		||||
 | 
			
		||||
#define is_offset_in_header(SIZE, TYPE)				\
 | 
			
		||||
	/* Reading from header? */				\
 | 
			
		||||
	addiu	$r_s0, $r_skb_hl, -SIZE;			\
 | 
			
		||||
	slt	t0, $r_s0, offset;				\
 | 
			
		||||
	bgtz	t0, bpf_slow_path_##TYPE;			\
 | 
			
		||||
 | 
			
		||||
LEAF(sk_load_word)
 | 
			
		||||
	is_offset_negative(word)
 | 
			
		||||
FEXPORT(sk_load_word_positive)
 | 
			
		||||
	is_offset_in_header(4, word)
 | 
			
		||||
	/* Offset within header boundaries */
 | 
			
		||||
	PTR_ADDU t1, $r_skb_data, offset
 | 
			
		||||
	.set	reorder
 | 
			
		||||
	lw	$r_A, 0(t1)
 | 
			
		||||
	.set	noreorder
 | 
			
		||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
 | 
			
		||||
# if MIPS_ISA_REV >= 2
 | 
			
		||||
	wsbh	t0, $r_A
 | 
			
		||||
	rotr	$r_A, t0, 16
 | 
			
		||||
# else
 | 
			
		||||
	sll	t0, $r_A, 24
 | 
			
		||||
	srl	t1, $r_A, 24
 | 
			
		||||
	srl	t2, $r_A, 8
 | 
			
		||||
	or	t0, t0, t1
 | 
			
		||||
	andi	t2, t2, 0xff00
 | 
			
		||||
	andi	t1, $r_A, 0xff00
 | 
			
		||||
	or	t0, t0, t2
 | 
			
		||||
	sll	t1, t1, 8
 | 
			
		||||
	or	$r_A, t0, t1
 | 
			
		||||
# endif
 | 
			
		||||
#endif
 | 
			
		||||
	jr	$r_ra
 | 
			
		||||
	 move	$r_ret, zero
 | 
			
		||||
	END(sk_load_word)
 | 
			
		||||
 | 
			
		||||
LEAF(sk_load_half)
 | 
			
		||||
	is_offset_negative(half)
 | 
			
		||||
FEXPORT(sk_load_half_positive)
 | 
			
		||||
	is_offset_in_header(2, half)
 | 
			
		||||
	/* Offset within header boundaries */
 | 
			
		||||
	PTR_ADDU t1, $r_skb_data, offset
 | 
			
		||||
	lhu	$r_A, 0(t1)
 | 
			
		||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
 | 
			
		||||
# if MIPS_ISA_REV >= 2
 | 
			
		||||
	wsbh	$r_A, $r_A
 | 
			
		||||
# else
 | 
			
		||||
	sll	t0, $r_A, 8
 | 
			
		||||
	srl	t1, $r_A, 8
 | 
			
		||||
	andi	t0, t0, 0xff00
 | 
			
		||||
	or	$r_A, t0, t1
 | 
			
		||||
# endif
 | 
			
		||||
#endif
 | 
			
		||||
	jr	$r_ra
 | 
			
		||||
	 move	$r_ret, zero
 | 
			
		||||
	END(sk_load_half)
 | 
			
		||||
 | 
			
		||||
LEAF(sk_load_byte)
 | 
			
		||||
	is_offset_negative(byte)
 | 
			
		||||
FEXPORT(sk_load_byte_positive)
 | 
			
		||||
	is_offset_in_header(1, byte)
 | 
			
		||||
	/* Offset within header boundaries */
 | 
			
		||||
	PTR_ADDU t1, $r_skb_data, offset
 | 
			
		||||
	lbu	$r_A, 0(t1)
 | 
			
		||||
	jr	$r_ra
 | 
			
		||||
	 move	$r_ret, zero
 | 
			
		||||
	END(sk_load_byte)
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * call skb_copy_bits:
 | 
			
		||||
 * (prototype in linux/skbuff.h)
 | 
			
		||||
 *
 | 
			
		||||
 * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len)
 | 
			
		||||
 *
 | 
			
		||||
 * o32 mandates we leave 4 spaces for argument registers in case
 | 
			
		||||
 * the callee needs to use them. Even though we don't care about
 | 
			
		||||
 * the argument registers ourselves, we need to allocate that space
 | 
			
		||||
 * to remain ABI compliant since the callee may want to use that space.
 | 
			
		||||
 * We also allocate 2 more spaces for $r_ra and our return register (*to).
 | 
			
		||||
 *
 | 
			
		||||
 * n64 is a bit different. The *caller* will allocate the space to preserve
 | 
			
		||||
 * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no
 | 
			
		||||
 * good reason but it does not matter that much really.
 | 
			
		||||
 *
 | 
			
		||||
 * (void *to) is returned in r_s0
 | 
			
		||||
 *
 | 
			
		||||
 */
 | 
			
		||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
 | 
			
		||||
#define DS_OFFSET(SIZE) (4 * SZREG)
 | 
			
		||||
#else
 | 
			
		||||
#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
 | 
			
		||||
#endif
 | 
			
		||||
#define bpf_slow_path_common(SIZE)				\
 | 
			
		||||
	/* Quick check. Are we within reasonable boundaries? */ \
 | 
			
		||||
	LONG_ADDIU	$r_s1, $r_skb_len, -SIZE;		\
 | 
			
		||||
	sltu		$r_s0, offset, $r_s1;			\
 | 
			
		||||
	beqz		$r_s0, fault;				\
 | 
			
		||||
	/* Load 4th argument in DS */				\
 | 
			
		||||
	 LONG_ADDIU	a3, zero, SIZE;				\
 | 
			
		||||
	PTR_ADDIU	$r_sp, $r_sp, -(6 * SZREG);		\
 | 
			
		||||
	PTR_LA		t0, skb_copy_bits;			\
 | 
			
		||||
	PTR_S		$r_ra, (5 * SZREG)($r_sp);		\
 | 
			
		||||
	/* Assign low slot to a2 */				\
 | 
			
		||||
	PTR_ADDIU	a2, $r_sp, DS_OFFSET(SIZE);		\
 | 
			
		||||
	jalr		t0;					\
 | 
			
		||||
	/* Reset our destination slot (DS but it's ok) */	\
 | 
			
		||||
	 INT_S		zero, (4 * SZREG)($r_sp);		\
 | 
			
		||||
	/*							\
 | 
			
		||||
	 * skb_copy_bits returns 0 on success and -EFAULT	\
 | 
			
		||||
	 * on error. Our data live in a2. Do not bother with	\
 | 
			
		||||
	 * our data if an error has been returned.		\
 | 
			
		||||
	 */							\
 | 
			
		||||
	/* Restore our frame */					\
 | 
			
		||||
	PTR_L		$r_ra, (5 * SZREG)($r_sp);		\
 | 
			
		||||
	INT_L		$r_s0, (4 * SZREG)($r_sp);		\
 | 
			
		||||
	bltz		v0, fault;				\
 | 
			
		||||
	 PTR_ADDIU	$r_sp, $r_sp, 6 * SZREG;		\
 | 
			
		||||
	move		$r_ret, zero;				\
 | 
			
		||||
 | 
			
		||||
NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
 | 
			
		||||
	bpf_slow_path_common(4)
 | 
			
		||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
 | 
			
		||||
# if MIPS_ISA_REV >= 2
 | 
			
		||||
	wsbh	t0, $r_s0
 | 
			
		||||
	jr	$r_ra
 | 
			
		||||
	 rotr	$r_A, t0, 16
 | 
			
		||||
# else
 | 
			
		||||
	sll	t0, $r_s0, 24
 | 
			
		||||
	srl	t1, $r_s0, 24
 | 
			
		||||
	srl	t2, $r_s0, 8
 | 
			
		||||
	or	t0, t0, t1
 | 
			
		||||
	andi	t2, t2, 0xff00
 | 
			
		||||
	andi	t1, $r_s0, 0xff00
 | 
			
		||||
	or	t0, t0, t2
 | 
			
		||||
	sll	t1, t1, 8
 | 
			
		||||
	jr	$r_ra
 | 
			
		||||
	 or	$r_A, t0, t1
 | 
			
		||||
# endif
 | 
			
		||||
#else
 | 
			
		||||
	jr	$r_ra
 | 
			
		||||
	 move	$r_A, $r_s0
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	END(bpf_slow_path_word)
 | 
			
		||||
 | 
			
		||||
NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
 | 
			
		||||
	bpf_slow_path_common(2)
 | 
			
		||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
 | 
			
		||||
# if MIPS_ISA_REV >= 2
 | 
			
		||||
	jr	$r_ra
 | 
			
		||||
	 wsbh	$r_A, $r_s0
 | 
			
		||||
# else
 | 
			
		||||
	sll	t0, $r_s0, 8
 | 
			
		||||
	andi	t1, $r_s0, 0xff00
 | 
			
		||||
	andi	t0, t0, 0xff00
 | 
			
		||||
	srl	t1, t1, 8
 | 
			
		||||
	jr	$r_ra
 | 
			
		||||
	 or	$r_A, t0, t1
 | 
			
		||||
# endif
 | 
			
		||||
#else
 | 
			
		||||
	jr	$r_ra
 | 
			
		||||
	 move	$r_A, $r_s0
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	END(bpf_slow_path_half)
 | 
			
		||||
 | 
			
		||||
NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp)
 | 
			
		||||
	bpf_slow_path_common(1)
 | 
			
		||||
	jr	$r_ra
 | 
			
		||||
	 move	$r_A, $r_s0
 | 
			
		||||
 | 
			
		||||
	END(bpf_slow_path_byte)
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Negative entry points
 | 
			
		||||
 */
 | 
			
		||||
	.macro bpf_is_end_of_data
 | 
			
		||||
	li	t0, SKF_LL_OFF
 | 
			
		||||
	/* Reading link layer data? */
 | 
			
		||||
	slt	t1, offset, t0
 | 
			
		||||
	bgtz	t1, fault
 | 
			
		||||
	/* Be careful what follows in DS. */
 | 
			
		||||
	.endm
 | 
			
		||||
/*
 | 
			
		||||
 * call skb_copy_bits:
 | 
			
		||||
 * (prototype in linux/filter.h)
 | 
			
		||||
 *
 | 
			
		||||
 * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
 | 
			
		||||
 *                                            int k, unsigned int size)
 | 
			
		||||
 *
 | 
			
		||||
 * see above (bpf_slow_path_common) for ABI restrictions
 | 
			
		||||
 */
 | 
			
		||||
#define bpf_negative_common(SIZE)					\
 | 
			
		||||
	PTR_ADDIU	$r_sp, $r_sp, -(6 * SZREG);			\
 | 
			
		||||
	PTR_LA		t0, bpf_internal_load_pointer_neg_helper;	\
 | 
			
		||||
	PTR_S		$r_ra, (5 * SZREG)($r_sp);			\
 | 
			
		||||
	jalr		t0;						\
 | 
			
		||||
	 li		a2, SIZE;					\
 | 
			
		||||
	PTR_L		$r_ra, (5 * SZREG)($r_sp);			\
 | 
			
		||||
	/* Check return pointer */					\
 | 
			
		||||
	beqz		v0, fault;					\
 | 
			
		||||
	 PTR_ADDIU	$r_sp, $r_sp, 6 * SZREG;			\
 | 
			
		||||
	/* Preserve our pointer */					\
 | 
			
		||||
	move		$r_s0, v0;					\
 | 
			
		||||
	/* Set return value */						\
 | 
			
		||||
	move		$r_ret, zero;					\
 | 
			
		||||
 | 
			
		||||
bpf_slow_path_word_neg:
 | 
			
		||||
	bpf_is_end_of_data
 | 
			
		||||
NESTED(sk_load_word_negative, (6 * SZREG), $r_sp)
 | 
			
		||||
	bpf_negative_common(4)
 | 
			
		||||
	jr	$r_ra
 | 
			
		||||
	 lw	$r_A, 0($r_s0)
 | 
			
		||||
	END(sk_load_word_negative)
 | 
			
		||||
 | 
			
		||||
bpf_slow_path_half_neg:
 | 
			
		||||
	bpf_is_end_of_data
 | 
			
		||||
NESTED(sk_load_half_negative, (6 * SZREG), $r_sp)
 | 
			
		||||
	bpf_negative_common(2)
 | 
			
		||||
	jr	$r_ra
 | 
			
		||||
	 lhu	$r_A, 0($r_s0)
 | 
			
		||||
	END(sk_load_half_negative)
 | 
			
		||||
 | 
			
		||||
bpf_slow_path_byte_neg:
 | 
			
		||||
	bpf_is_end_of_data
 | 
			
		||||
NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp)
 | 
			
		||||
	bpf_negative_common(1)
 | 
			
		||||
	jr	$r_ra
 | 
			
		||||
	 lbu	$r_A, 0($r_s0)
 | 
			
		||||
	END(sk_load_byte_negative)
 | 
			
		||||
 | 
			
		||||
fault:
 | 
			
		||||
	jr	$r_ra
 | 
			
		||||
	 addiu $r_ret, zero, 1
 | 
			
		||||
		Loading…
	
		Reference in a new issue