forked from mirrors/linux
		
	bpf: Disable GCC -fgcse optimization for ___bpf_prog_run()
On x86-64, with CONFIG_RETPOLINE=n, GCC's "global common subexpression
elimination" optimization results in ___bpf_prog_run()'s jumptable code
changing from this:
	select_insn:
		jmp *jumptable(, %rax, 8)
		...
	ALU64_ADD_X:
		...
		jmp *jumptable(, %rax, 8)
	ALU_ADD_X:
		...
		jmp *jumptable(, %rax, 8)
to this:
	select_insn:
		mov jumptable, %r12
		jmp *(%r12, %rax, 8)
		...
	ALU64_ADD_X:
		...
		jmp *(%r12, %rax, 8)
	ALU_ADD_X:
		...
		jmp *(%r12, %rax, 8)
The jumptable address is placed in a register once, at the beginning of
the function.  The function execution can then go through multiple
indirect jumps which rely on that same register value.  This has a few
issues:
1) Objtool isn't smart enough to be able to track such a register value
   across multiple recursive indirect jumps through the jump table.
2) With CONFIG_RETPOLINE enabled, this optimization actually results in
   a small slowdown.  I measured a ~4.7% slowdown in the test_bpf
   "tcpdump port 22" selftest.
   This slowdown is actually predicted by the GCC manual:
     Note: When compiling a program using computed gotos, a GCC
     extension, you may get better run-time performance if you
     disable the global common subexpression elimination pass by
     adding -fno-gcse to the command line.
So just disable the optimization for this function.
Fixes: e55a73251d ("bpf: Fix ORC unwinding in non-JIT BPF code")
Reported-by: Randy Dunlap <rdunlap@infradead.org>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/30c3ca29ba037afcbd860a8672eef0021addf9fe.1563413318.git.jpoimboe@redhat.com
			
			
This commit is contained in:
		
							parent
							
								
									82e844a653
								
							
						
					
					
						commit
						3193c0836f
					
				
					 3 changed files with 7 additions and 1 deletions
				
			
		|  | @ -170,3 +170,5 @@ | ||||||
| #else | #else | ||||||
| #define __diag_GCC_8(s) | #define __diag_GCC_8(s) | ||||||
| #endif | #endif | ||||||
|  | 
 | ||||||
|  | #define __no_fgcse __attribute__((optimize("-fno-gcse"))) | ||||||
|  |  | ||||||
|  | @ -189,6 +189,10 @@ struct ftrace_likely_data { | ||||||
| #define asm_volatile_goto(x...) asm goto(x) | #define asm_volatile_goto(x...) asm goto(x) | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
|  | #ifndef __no_fgcse | ||||||
|  | # define __no_fgcse | ||||||
|  | #endif | ||||||
|  | 
 | ||||||
| /* Are two types/vars the same type (ignoring qualifiers)? */ | /* Are two types/vars the same type (ignoring qualifiers)? */ | ||||||
| #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) | #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1295,7 +1295,7 @@ bool bpf_opcode_in_insntable(u8 code) | ||||||
|  * |  * | ||||||
|  * Decode and execute eBPF instructions. |  * Decode and execute eBPF instructions. | ||||||
|  */ |  */ | ||||||
| static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) | static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) | ||||||
| { | { | ||||||
| #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y | #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y | ||||||
| #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z | #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Josh Poimboeuf
						Josh Poimboeuf