mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	x86/retbleed: Add fine grained Kconfig knobs
Do fine-grained Kconfig for all the various retbleed parts. NOTE: if your compiler doesn't support return thunks this will silently 'upgrade' your mitigation to IBPB, you might not like this. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Borislav Petkov <bp@suse.de>
This commit is contained in:
		
							parent
							
								
									26aae8ccbc
								
							
						
					
					
						commit
						f43b9876e8
					
				
					 19 changed files with 178 additions and 69 deletions
				
			
		
							
								
								
									
										111
									
								
								arch/x86/Kconfig
									
									
									
									
									
								
							
							
						
						
									
										111
									
								
								arch/x86/Kconfig
									
									
									
									
									
								
							|  | @ -462,32 +462,6 @@ config GOLDFISH | ||||||
| 	def_bool y | 	def_bool y | ||||||
| 	depends on X86_GOLDFISH | 	depends on X86_GOLDFISH | ||||||
| 
 | 
 | ||||||
| config RETPOLINE |  | ||||||
| 	bool "Avoid speculative indirect branches in kernel" |  | ||||||
| 	select OBJTOOL if HAVE_OBJTOOL |  | ||||||
| 	default y |  | ||||||
| 	help |  | ||||||
| 	  Compile kernel with the retpoline compiler options to guard against |  | ||||||
| 	  kernel-to-user data leaks by avoiding speculative indirect |  | ||||||
| 	  branches. Requires a compiler with -mindirect-branch=thunk-extern |  | ||||||
| 	  support for full protection. The kernel may run slower. |  | ||||||
| 
 |  | ||||||
| config CC_HAS_SLS |  | ||||||
| 	def_bool $(cc-option,-mharden-sls=all) |  | ||||||
| 
 |  | ||||||
| config CC_HAS_RETURN_THUNK |  | ||||||
| 	def_bool $(cc-option,-mfunction-return=thunk-extern) |  | ||||||
| 
 |  | ||||||
| config SLS |  | ||||||
| 	bool "Mitigate Straight-Line-Speculation" |  | ||||||
| 	depends on CC_HAS_SLS && X86_64 |  | ||||||
| 	select OBJTOOL if HAVE_OBJTOOL |  | ||||||
| 	default n |  | ||||||
| 	help |  | ||||||
| 	  Compile the kernel with straight-line-speculation options to guard |  | ||||||
| 	  against straight line speculation. The kernel image might be slightly |  | ||||||
| 	  larger. |  | ||||||
| 
 |  | ||||||
| config X86_CPU_RESCTRL | config X86_CPU_RESCTRL | ||||||
| 	bool "x86 CPU resource control support" | 	bool "x86 CPU resource control support" | ||||||
| 	depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) | 	depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) | ||||||
|  | @ -2456,6 +2430,91 @@ source "kernel/livepatch/Kconfig" | ||||||
| 
 | 
 | ||||||
| endmenu | endmenu | ||||||
| 
 | 
 | ||||||
|  | config CC_HAS_SLS | ||||||
|  | 	def_bool $(cc-option,-mharden-sls=all) | ||||||
|  | 
 | ||||||
|  | config CC_HAS_RETURN_THUNK | ||||||
|  | 	def_bool $(cc-option,-mfunction-return=thunk-extern) | ||||||
|  | 
 | ||||||
|  | menuconfig SPECULATION_MITIGATIONS | ||||||
|  | 	bool "Mitigations for speculative execution vulnerabilities" | ||||||
|  | 	default y | ||||||
|  | 	help | ||||||
|  | 	  Say Y here to enable options which enable mitigations for | ||||||
|  | 	  speculative execution hardware vulnerabilities. | ||||||
|  | 
 | ||||||
|  | 	  If you say N, all mitigations will be disabled. You really | ||||||
|  | 	  should know what you are doing to say so. | ||||||
|  | 
 | ||||||
|  | if SPECULATION_MITIGATIONS | ||||||
|  | 
 | ||||||
|  | config PAGE_TABLE_ISOLATION | ||||||
|  | 	bool "Remove the kernel mapping in user mode" | ||||||
|  | 	default y | ||||||
|  | 	depends on (X86_64 || X86_PAE) | ||||||
|  | 	help | ||||||
|  | 	  This feature reduces the number of hardware side channels by | ||||||
|  | 	  ensuring that the majority of kernel addresses are not mapped | ||||||
|  | 	  into userspace. | ||||||
|  | 
 | ||||||
|  | 	  See Documentation/x86/pti.rst for more details. | ||||||
|  | 
 | ||||||
|  | config RETPOLINE | ||||||
|  | 	bool "Avoid speculative indirect branches in kernel" | ||||||
|  | 	select OBJTOOL if HAVE_OBJTOOL | ||||||
|  | 	default y | ||||||
|  | 	help | ||||||
|  | 	  Compile kernel with the retpoline compiler options to guard against | ||||||
|  | 	  kernel-to-user data leaks by avoiding speculative indirect | ||||||
|  | 	  branches. Requires a compiler with -mindirect-branch=thunk-extern | ||||||
|  | 	  support for full protection. The kernel may run slower. | ||||||
|  | 
 | ||||||
|  | config RETHUNK | ||||||
|  | 	bool "Enable return-thunks" | ||||||
|  | 	depends on RETPOLINE && CC_HAS_RETURN_THUNK | ||||||
|  | 	select OBJTOOL if HAVE_OBJTOOL | ||||||
|  | 	default y | ||||||
|  | 	help | ||||||
|  | 	  Compile the kernel with the return-thunks compiler option to guard | ||||||
|  | 	  against kernel-to-user data leaks by avoiding return speculation. | ||||||
|  | 	  Requires a compiler with -mfunction-return=thunk-extern | ||||||
|  | 	  support for full protection. The kernel may run slower. | ||||||
|  | 
 | ||||||
|  | config CPU_UNRET_ENTRY | ||||||
|  | 	bool "Enable UNRET on kernel entry" | ||||||
|  | 	depends on CPU_SUP_AMD && RETHUNK | ||||||
|  | 	default y | ||||||
|  | 	help | ||||||
|  | 	  Compile the kernel with support for the retbleed=unret mitigation. | ||||||
|  | 
 | ||||||
|  | config CPU_IBPB_ENTRY | ||||||
|  | 	bool "Enable IBPB on kernel entry" | ||||||
|  | 	depends on CPU_SUP_AMD | ||||||
|  | 	default y | ||||||
|  | 	help | ||||||
|  | 	  Compile the kernel with support for the retbleed=ibpb mitigation. | ||||||
|  | 
 | ||||||
|  | config CPU_IBRS_ENTRY | ||||||
|  | 	bool "Enable IBRS on kernel entry" | ||||||
|  | 	depends on CPU_SUP_INTEL | ||||||
|  | 	default y | ||||||
|  | 	help | ||||||
|  | 	  Compile the kernel with support for the spectre_v2=ibrs mitigation. | ||||||
|  | 	  This mitigates both spectre_v2 and retbleed at great cost to | ||||||
|  | 	  performance. | ||||||
|  | 
 | ||||||
|  | config SLS | ||||||
|  | 	bool "Mitigate Straight-Line-Speculation" | ||||||
|  | 	depends on CC_HAS_SLS && X86_64 | ||||||
|  | 	select OBJTOOL if HAVE_OBJTOOL | ||||||
|  | 	default n | ||||||
|  | 	help | ||||||
|  | 	  Compile the kernel with straight-line-speculation options to guard | ||||||
|  | 	  against straight line speculation. The kernel image might be slightly | ||||||
|  | 	  larger. | ||||||
|  | 
 | ||||||
|  | endif | ||||||
|  | 
 | ||||||
| config ARCH_HAS_ADD_PAGES | config ARCH_HAS_ADD_PAGES | ||||||
| 	def_bool y | 	def_bool y | ||||||
| 	depends on ARCH_ENABLE_MEMORY_HOTPLUG | 	depends on ARCH_ENABLE_MEMORY_HOTPLUG | ||||||
|  |  | ||||||
|  | @ -15,14 +15,18 @@ endif | ||||||
| ifdef CONFIG_CC_IS_GCC | ifdef CONFIG_CC_IS_GCC | ||||||
| RETPOLINE_CFLAGS	:= $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) | RETPOLINE_CFLAGS	:= $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) | ||||||
| RETPOLINE_CFLAGS	+= $(call cc-option,-mindirect-branch-cs-prefix) | RETPOLINE_CFLAGS	+= $(call cc-option,-mindirect-branch-cs-prefix) | ||||||
| RETPOLINE_CFLAGS	+= $(call cc-option,-mfunction-return=thunk-extern) |  | ||||||
| RETPOLINE_VDSO_CFLAGS	:= $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register) | RETPOLINE_VDSO_CFLAGS	:= $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register) | ||||||
| endif | endif | ||||||
| ifdef CONFIG_CC_IS_CLANG | ifdef CONFIG_CC_IS_CLANG | ||||||
| RETPOLINE_CFLAGS	:= -mretpoline-external-thunk | RETPOLINE_CFLAGS	:= -mretpoline-external-thunk | ||||||
| RETPOLINE_VDSO_CFLAGS	:= -mretpoline | RETPOLINE_VDSO_CFLAGS	:= -mretpoline | ||||||
| RETPOLINE_CFLAGS	+= $(call cc-option,-mfunction-return=thunk-extern) |  | ||||||
| endif | endif | ||||||
|  | 
 | ||||||
|  | ifdef CONFIG_RETHUNK | ||||||
|  | RETHUNK_CFLAGS		:= -mfunction-return=thunk-extern | ||||||
|  | RETPOLINE_CFLAGS	+= $(RETHUNK_CFLAGS) | ||||||
|  | endif | ||||||
|  | 
 | ||||||
| export RETPOLINE_CFLAGS | export RETPOLINE_CFLAGS | ||||||
| export RETPOLINE_VDSO_CFLAGS | export RETPOLINE_VDSO_CFLAGS | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -297,6 +297,7 @@ For 32-bit we have the following conventions - kernel is built with | ||||||
|  * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set. |  * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set. | ||||||
|  */ |  */ | ||||||
| .macro IBRS_ENTER save_reg | .macro IBRS_ENTER save_reg | ||||||
|  | #ifdef CONFIG_CPU_IBRS_ENTRY | ||||||
| 	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS | 	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS | ||||||
| 	movl	$MSR_IA32_SPEC_CTRL, %ecx | 	movl	$MSR_IA32_SPEC_CTRL, %ecx | ||||||
| 
 | 
 | ||||||
|  | @ -317,6 +318,7 @@ For 32-bit we have the following conventions - kernel is built with | ||||||
| 	shr	$32, %rdx | 	shr	$32, %rdx | ||||||
| 	wrmsr | 	wrmsr | ||||||
| .Lend_\@: | .Lend_\@: | ||||||
|  | #endif | ||||||
| .endm | .endm | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  | @ -324,6 +326,7 @@ For 32-bit we have the following conventions - kernel is built with | ||||||
|  * regs. Must be called after the last RET. |  * regs. Must be called after the last RET. | ||||||
|  */ |  */ | ||||||
| .macro IBRS_EXIT save_reg | .macro IBRS_EXIT save_reg | ||||||
|  | #ifdef CONFIG_CPU_IBRS_ENTRY | ||||||
| 	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS | 	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS | ||||||
| 	movl	$MSR_IA32_SPEC_CTRL, %ecx | 	movl	$MSR_IA32_SPEC_CTRL, %ecx | ||||||
| 
 | 
 | ||||||
|  | @ -338,6 +341,7 @@ For 32-bit we have the following conventions - kernel is built with | ||||||
| 	shr	$32, %rdx | 	shr	$32, %rdx | ||||||
| 	wrmsr | 	wrmsr | ||||||
| .Lend_\@: | .Lend_\@: | ||||||
|  | #endif | ||||||
| .endm | .endm | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  |  | ||||||
|  | @ -54,9 +54,19 @@ | ||||||
| # define DISABLE_RETPOLINE	0 | # define DISABLE_RETPOLINE	0 | ||||||
| #else | #else | ||||||
| # define DISABLE_RETPOLINE	((1 << (X86_FEATURE_RETPOLINE & 31)) | \ | # define DISABLE_RETPOLINE	((1 << (X86_FEATURE_RETPOLINE & 31)) | \ | ||||||
| 				 (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)) | \ | 				 (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31))) | ||||||
| 				 (1 << (X86_FEATURE_RETHUNK & 31)) | \ | #endif | ||||||
| 				 (1 << (X86_FEATURE_UNRET & 31))) | 
 | ||||||
|  | #ifdef CONFIG_RETHUNK | ||||||
|  | # define DISABLE_RETHUNK	0 | ||||||
|  | #else | ||||||
|  | # define DISABLE_RETHUNK	(1 << (X86_FEATURE_RETHUNK & 31)) | ||||||
|  | #endif | ||||||
|  | 
 | ||||||
|  | #ifdef CONFIG_CPU_UNRET_ENTRY | ||||||
|  | # define DISABLE_UNRET		0 | ||||||
|  | #else | ||||||
|  | # define DISABLE_UNRET		(1 << (X86_FEATURE_UNRET & 31)) | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_INTEL_IOMMU_SVM | #ifdef CONFIG_INTEL_IOMMU_SVM | ||||||
|  | @ -91,7 +101,7 @@ | ||||||
| #define DISABLED_MASK8	(DISABLE_TDX_GUEST) | #define DISABLED_MASK8	(DISABLE_TDX_GUEST) | ||||||
| #define DISABLED_MASK9	(DISABLE_SGX) | #define DISABLED_MASK9	(DISABLE_SGX) | ||||||
| #define DISABLED_MASK10	0 | #define DISABLED_MASK10	0 | ||||||
| #define DISABLED_MASK11	(DISABLE_RETPOLINE) | #define DISABLED_MASK11	(DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET) | ||||||
| #define DISABLED_MASK12	0 | #define DISABLED_MASK12	0 | ||||||
| #define DISABLED_MASK13	0 | #define DISABLED_MASK13	0 | ||||||
| #define DISABLED_MASK14	0 | #define DISABLED_MASK14	0 | ||||||
|  |  | ||||||
|  | @ -19,7 +19,7 @@ | ||||||
| #define __ALIGN_STR	__stringify(__ALIGN) | #define __ALIGN_STR	__stringify(__ALIGN) | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #if defined(CONFIG_RETPOLINE) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) | #if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) | ||||||
| #define RET	jmp __x86_return_thunk | #define RET	jmp __x86_return_thunk | ||||||
| #else /* CONFIG_RETPOLINE */ | #else /* CONFIG_RETPOLINE */ | ||||||
| #ifdef CONFIG_SLS | #ifdef CONFIG_SLS | ||||||
|  | @ -31,7 +31,7 @@ | ||||||
| 
 | 
 | ||||||
| #else /* __ASSEMBLY__ */ | #else /* __ASSEMBLY__ */ | ||||||
| 
 | 
 | ||||||
| #if defined(CONFIG_RETPOLINE) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) | #if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) | ||||||
| #define ASM_RET	"jmp __x86_return_thunk\n\t" | #define ASM_RET	"jmp __x86_return_thunk\n\t" | ||||||
| #else /* CONFIG_RETPOLINE */ | #else /* CONFIG_RETPOLINE */ | ||||||
| #ifdef CONFIG_SLS | #ifdef CONFIG_SLS | ||||||
|  |  | ||||||
|  | @ -127,6 +127,12 @@ | ||||||
| .Lskip_rsb_\@: | .Lskip_rsb_\@: | ||||||
| .endm | .endm | ||||||
| 
 | 
 | ||||||
|  | #ifdef CONFIG_CPU_UNRET_ENTRY | ||||||
|  | #define CALL_ZEN_UNTRAIN_RET	"call zen_untrain_ret" | ||||||
|  | #else | ||||||
|  | #define CALL_ZEN_UNTRAIN_RET	"" | ||||||
|  | #endif | ||||||
|  | 
 | ||||||
| /*
 | /*
 | ||||||
|  * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the |  * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the | ||||||
|  * return thunk isn't mapped into the userspace tables (then again, AMD |  * return thunk isn't mapped into the userspace tables (then again, AMD | ||||||
|  | @ -139,10 +145,10 @@ | ||||||
|  * where we have a stack but before any RET instruction. |  * where we have a stack but before any RET instruction. | ||||||
|  */ |  */ | ||||||
| .macro UNTRAIN_RET | .macro UNTRAIN_RET | ||||||
| #ifdef CONFIG_RETPOLINE | #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) | ||||||
| 	ANNOTATE_UNRET_END | 	ANNOTATE_UNRET_END | ||||||
| 	ALTERNATIVE_2 "",						\ | 	ALTERNATIVE_2 "",						\ | ||||||
| 	              "call zen_untrain_ret", X86_FEATURE_UNRET,	\ | 	              CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET,		\ | ||||||
| 		      "call entry_ibpb", X86_FEATURE_ENTRY_IBPB | 		      "call entry_ibpb", X86_FEATURE_ENTRY_IBPB | ||||||
| #endif | #endif | ||||||
| .endm | .endm | ||||||
|  |  | ||||||
|  | @ -46,7 +46,7 @@ | ||||||
| #define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func)			\ | #define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func)			\ | ||||||
| 	__ARCH_DEFINE_STATIC_CALL_TRAMP(name, ".byte 0xe9; .long " #func " - (. + 4)") | 	__ARCH_DEFINE_STATIC_CALL_TRAMP(name, ".byte 0xe9; .long " #func " - (. + 4)") | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_RETPOLINE | #ifdef CONFIG_RETHUNK | ||||||
| #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)			\ | #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)			\ | ||||||
| 	__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "jmp __x86_return_thunk") | 	__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "jmp __x86_return_thunk") | ||||||
| #else | #else | ||||||
|  |  | ||||||
|  | @ -508,6 +508,7 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | #ifdef CONFIG_RETHUNK | ||||||
| /*
 | /*
 | ||||||
|  * Rewrite the compiler generated return thunk tail-calls. |  * Rewrite the compiler generated return thunk tail-calls. | ||||||
|  * |  * | ||||||
|  | @ -569,6 +570,10 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  | #else | ||||||
|  | void __init_or_module noinline apply_returns(s32 *start, s32 *end) { } | ||||||
|  | #endif /* CONFIG_RETHUNK */ | ||||||
|  | 
 | ||||||
| #else /* !CONFIG_RETPOLINE || !CONFIG_OBJTOOL */ | #else /* !CONFIG_RETPOLINE || !CONFIG_OBJTOOL */ | ||||||
| 
 | 
 | ||||||
| void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { } | void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { } | ||||||
|  |  | ||||||
|  | @ -864,6 +864,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c) | ||||||
| 
 | 
 | ||||||
| void init_spectral_chicken(struct cpuinfo_x86 *c) | void init_spectral_chicken(struct cpuinfo_x86 *c) | ||||||
| { | { | ||||||
|  | #ifdef CONFIG_CPU_UNRET_ENTRY | ||||||
| 	u64 value; | 	u64 value; | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
|  | @ -880,6 +881,7 @@ void init_spectral_chicken(struct cpuinfo_x86 *c) | ||||||
| 			wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value); | 			wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value); | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  | #endif | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void init_amd_zn(struct cpuinfo_x86 *c) | static void init_amd_zn(struct cpuinfo_x86 *c) | ||||||
|  |  | ||||||
|  | @ -842,7 +842,6 @@ static int __init retbleed_parse_cmdline(char *str) | ||||||
| early_param("retbleed", retbleed_parse_cmdline); | early_param("retbleed", retbleed_parse_cmdline); | ||||||
| 
 | 
 | ||||||
| #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" | #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" | ||||||
| #define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler; falling back to IBPB!\n" |  | ||||||
| #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" | #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" | ||||||
| 
 | 
 | ||||||
| static void __init retbleed_select_mitigation(void) | static void __init retbleed_select_mitigation(void) | ||||||
|  | @ -857,18 +856,33 @@ static void __init retbleed_select_mitigation(void) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
| 	case RETBLEED_CMD_UNRET: | 	case RETBLEED_CMD_UNRET: | ||||||
| 		retbleed_mitigation = RETBLEED_MITIGATION_UNRET; | 		if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) { | ||||||
|  | 			retbleed_mitigation = RETBLEED_MITIGATION_UNRET; | ||||||
|  | 		} else { | ||||||
|  | 			pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n"); | ||||||
|  | 			goto do_cmd_auto; | ||||||
|  | 		} | ||||||
| 		break; | 		break; | ||||||
| 
 | 
 | ||||||
| 	case RETBLEED_CMD_IBPB: | 	case RETBLEED_CMD_IBPB: | ||||||
| 		retbleed_mitigation = RETBLEED_MITIGATION_IBPB; | 		if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { | ||||||
|  | 			retbleed_mitigation = RETBLEED_MITIGATION_IBPB; | ||||||
|  | 		} else { | ||||||
|  | 			pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); | ||||||
|  | 			goto do_cmd_auto; | ||||||
|  | 		} | ||||||
| 		break; | 		break; | ||||||
| 
 | 
 | ||||||
|  | do_cmd_auto: | ||||||
| 	case RETBLEED_CMD_AUTO: | 	case RETBLEED_CMD_AUTO: | ||||||
| 	default: | 	default: | ||||||
| 		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || | 		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || | ||||||
| 		    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) | 		    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { | ||||||
| 			retbleed_mitigation = RETBLEED_MITIGATION_UNRET; | 			if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) | ||||||
|  | 				retbleed_mitigation = RETBLEED_MITIGATION_UNRET; | ||||||
|  | 			else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) | ||||||
|  | 				retbleed_mitigation = RETBLEED_MITIGATION_IBPB; | ||||||
|  | 		} | ||||||
| 
 | 
 | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * The Intel mitigation (IBRS or eIBRS) was already selected in | 		 * The Intel mitigation (IBRS or eIBRS) was already selected in | ||||||
|  | @ -881,14 +895,6 @@ static void __init retbleed_select_mitigation(void) | ||||||
| 
 | 
 | ||||||
| 	switch (retbleed_mitigation) { | 	switch (retbleed_mitigation) { | ||||||
| 	case RETBLEED_MITIGATION_UNRET: | 	case RETBLEED_MITIGATION_UNRET: | ||||||
| 
 |  | ||||||
| 		if (!IS_ENABLED(CONFIG_RETPOLINE) || |  | ||||||
| 		    !IS_ENABLED(CONFIG_CC_HAS_RETURN_THUNK)) { |  | ||||||
| 			pr_err(RETBLEED_COMPILER_MSG); |  | ||||||
| 			retbleed_mitigation = RETBLEED_MITIGATION_IBPB; |  | ||||||
| 			goto retbleed_force_ibpb; |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		setup_force_cpu_cap(X86_FEATURE_RETHUNK); | 		setup_force_cpu_cap(X86_FEATURE_RETHUNK); | ||||||
| 		setup_force_cpu_cap(X86_FEATURE_UNRET); | 		setup_force_cpu_cap(X86_FEATURE_UNRET); | ||||||
| 
 | 
 | ||||||
|  | @ -900,7 +906,6 @@ static void __init retbleed_select_mitigation(void) | ||||||
| 		break; | 		break; | ||||||
| 
 | 
 | ||||||
| 	case RETBLEED_MITIGATION_IBPB: | 	case RETBLEED_MITIGATION_IBPB: | ||||||
| retbleed_force_ibpb: |  | ||||||
| 		setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); | 		setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); | ||||||
| 		mitigate_smt = true; | 		mitigate_smt = true; | ||||||
| 		break; | 		break; | ||||||
|  | @ -1271,6 +1276,12 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) | ||||||
| 		return SPECTRE_V2_CMD_AUTO; | 		return SPECTRE_V2_CMD_AUTO; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) { | ||||||
|  | 		pr_err("%s selected but not compiled in. Switching to AUTO select\n", | ||||||
|  | 		       mitigation_options[i].option); | ||||||
|  | 		return SPECTRE_V2_CMD_AUTO; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { | 	if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { | ||||||
| 		pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", | 		pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", | ||||||
| 		       mitigation_options[i].option); | 		       mitigation_options[i].option); | ||||||
|  | @ -1328,7 +1339,8 @@ static void __init spectre_v2_select_mitigation(void) | ||||||
| 			break; | 			break; | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		if (boot_cpu_has_bug(X86_BUG_RETBLEED) && | 		if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) && | ||||||
|  | 		    boot_cpu_has_bug(X86_BUG_RETBLEED) && | ||||||
| 		    retbleed_cmd != RETBLEED_CMD_OFF && | 		    retbleed_cmd != RETBLEED_CMD_OFF && | ||||||
| 		    boot_cpu_has(X86_FEATURE_IBRS) && | 		    boot_cpu_has(X86_FEATURE_IBRS) && | ||||||
| 		    boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { | 		    boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { | ||||||
|  |  | ||||||
|  | @ -126,7 +126,7 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail) | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(arch_static_call_transform); | EXPORT_SYMBOL_GPL(arch_static_call_transform); | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_RETPOLINE | #ifdef CONFIG_RETHUNK | ||||||
| /*
 | /*
 | ||||||
|  * This is called by apply_returns() to fix up static call trampolines, |  * This is called by apply_returns() to fix up static call trampolines, | ||||||
|  * specifically ARCH_DEFINE_STATIC_CALL_NULL_TRAMP which is recorded as |  * specifically ARCH_DEFINE_STATIC_CALL_NULL_TRAMP which is recorded as | ||||||
|  |  | ||||||
|  | @ -439,10 +439,10 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop); | ||||||
|  * |  * | ||||||
|  * ENDBR			[4 bytes; CONFIG_X86_KERNEL_IBT] |  * ENDBR			[4 bytes; CONFIG_X86_KERNEL_IBT] | ||||||
|  * SETcc %al			[3 bytes] |  * SETcc %al			[3 bytes] | ||||||
|  * RET | JMP __x86_return_thunk	[1,5 bytes; CONFIG_RETPOLINE] |  * RET | JMP __x86_return_thunk	[1,5 bytes; CONFIG_RETHUNK] | ||||||
|  * INT3				[1 byte; CONFIG_SLS] |  * INT3				[1 byte; CONFIG_SLS] | ||||||
|  */ |  */ | ||||||
| #define RET_LENGTH	(1 + (4 * IS_ENABLED(CONFIG_RETPOLINE)) + \ | #define RET_LENGTH	(1 + (4 * IS_ENABLED(CONFIG_RETHUNK)) + \ | ||||||
| 			 IS_ENABLED(CONFIG_SLS)) | 			 IS_ENABLED(CONFIG_SLS)) | ||||||
| #define SETCC_LENGTH	(ENDBR_INSN_SIZE + 3 + RET_LENGTH) | #define SETCC_LENGTH	(ENDBR_INSN_SIZE + 3 + RET_LENGTH) | ||||||
| #define SETCC_ALIGN	(4 << ((SETCC_LENGTH > 4) & 1) << ((SETCC_LENGTH > 8) & 1)) | #define SETCC_ALIGN	(4 << ((SETCC_LENGTH > 4) & 1) << ((SETCC_LENGTH > 8) & 1)) | ||||||
|  |  | ||||||
|  | @ -72,6 +72,8 @@ SYM_CODE_END(__x86_indirect_thunk_array) | ||||||
|  * This function name is magical and is used by -mfunction-return=thunk-extern |  * This function name is magical and is used by -mfunction-return=thunk-extern | ||||||
|  * for the compiler to generate JMPs to it. |  * for the compiler to generate JMPs to it. | ||||||
|  */ |  */ | ||||||
|  | #ifdef CONFIG_RETHUNK | ||||||
|  | 
 | ||||||
| 	.section .text.__x86.return_thunk | 	.section .text.__x86.return_thunk | ||||||
| 
 | 
 | ||||||
| /* | /* | ||||||
|  | @ -136,3 +138,5 @@ SYM_FUNC_END(zen_untrain_ret) | ||||||
| __EXPORT_THUNK(zen_untrain_ret) | __EXPORT_THUNK(zen_untrain_ret) | ||||||
| 
 | 
 | ||||||
| EXPORT_SYMBOL(__x86_return_thunk) | EXPORT_SYMBOL(__x86_return_thunk) | ||||||
|  | 
 | ||||||
|  | #endif /* CONFIG_RETHUNK */ | ||||||
|  |  | ||||||
|  | @ -236,6 +236,7 @@ objtool_args =								\ | ||||||
| 	$(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount)		\ | 	$(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount)		\ | ||||||
| 	$(if $(CONFIG_UNWINDER_ORC), --orc)				\ | 	$(if $(CONFIG_UNWINDER_ORC), --orc)				\ | ||||||
| 	$(if $(CONFIG_RETPOLINE), --retpoline)				\ | 	$(if $(CONFIG_RETPOLINE), --retpoline)				\ | ||||||
|  | 	$(if $(CONFIG_RETHUNK), --rethunk)				\ | ||||||
| 	$(if $(CONFIG_SLS), --sls)					\ | 	$(if $(CONFIG_SLS), --sls)					\ | ||||||
| 	$(if $(CONFIG_STACK_VALIDATION), --stackval)			\ | 	$(if $(CONFIG_STACK_VALIDATION), --stackval)			\ | ||||||
| 	$(if $(CONFIG_HAVE_STATIC_CALL_INLINE), --static-call)		\ | 	$(if $(CONFIG_HAVE_STATIC_CALL_INLINE), --static-call)		\ | ||||||
|  |  | ||||||
|  | @ -44,7 +44,7 @@ objtool-enabled := $(or $(delay-objtool),$(CONFIG_NOINSTR_VALIDATION)) | ||||||
| 
 | 
 | ||||||
| objtool_args := \ | objtool_args := \ | ||||||
| 	$(if $(delay-objtool),$(objtool_args)) \ | 	$(if $(delay-objtool),$(objtool_args)) \ | ||||||
| 	$(if $(CONFIG_NOINSTR_VALIDATION), --noinstr $(if $(CONFIG_RETPOLINE), --unret)) \ | 	$(if $(CONFIG_NOINSTR_VALIDATION), --noinstr $(if $(CONFIG_CPU_UNRET_ENTRY), --unret)) \ | ||||||
| 	$(if $(CONFIG_GCOV_KERNEL), --no-unreachable) \ | 	$(if $(CONFIG_GCOV_KERNEL), --no-unreachable) \ | ||||||
| 	--link | 	--link | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -54,17 +54,6 @@ config SECURITY_NETWORK | ||||||
| 	  implement socket and networking access controls. | 	  implement socket and networking access controls. | ||||||
| 	  If you are unsure how to answer this question, answer N. | 	  If you are unsure how to answer this question, answer N. | ||||||
| 
 | 
 | ||||||
| config PAGE_TABLE_ISOLATION |  | ||||||
| 	bool "Remove the kernel mapping in user mode" |  | ||||||
| 	default y |  | ||||||
| 	depends on (X86_64 || X86_PAE) && !UML |  | ||||||
| 	help |  | ||||||
| 	  This feature reduces the number of hardware side channels by |  | ||||||
| 	  ensuring that the majority of kernel addresses are not mapped |  | ||||||
| 	  into userspace. |  | ||||||
| 
 |  | ||||||
| 	  See Documentation/x86/pti.rst for more details. |  | ||||||
| 
 |  | ||||||
| config SECURITY_INFINIBAND | config SECURITY_INFINIBAND | ||||||
| 	bool "Infiniband Security Hooks" | 	bool "Infiniband Security Hooks" | ||||||
| 	depends on SECURITY && INFINIBAND | 	depends on SECURITY && INFINIBAND | ||||||
|  |  | ||||||
|  | @ -68,6 +68,7 @@ const struct option check_options[] = { | ||||||
| 	OPT_BOOLEAN('n', "noinstr", &opts.noinstr, "validate noinstr rules"), | 	OPT_BOOLEAN('n', "noinstr", &opts.noinstr, "validate noinstr rules"), | ||||||
| 	OPT_BOOLEAN('o', "orc", &opts.orc, "generate ORC metadata"), | 	OPT_BOOLEAN('o', "orc", &opts.orc, "generate ORC metadata"), | ||||||
| 	OPT_BOOLEAN('r', "retpoline", &opts.retpoline, "validate and annotate retpoline usage"), | 	OPT_BOOLEAN('r', "retpoline", &opts.retpoline, "validate and annotate retpoline usage"), | ||||||
|  | 	OPT_BOOLEAN(0,   "rethunk", &opts.rethunk, "validate and annotate rethunk usage"), | ||||||
| 	OPT_BOOLEAN(0,   "unret", &opts.unret, "validate entry unret placement"), | 	OPT_BOOLEAN(0,   "unret", &opts.unret, "validate entry unret placement"), | ||||||
| 	OPT_BOOLEAN('l', "sls", &opts.sls, "validate straight-line-speculation mitigations"), | 	OPT_BOOLEAN('l', "sls", &opts.sls, "validate straight-line-speculation mitigations"), | ||||||
| 	OPT_BOOLEAN('s', "stackval", &opts.stackval, "validate frame pointer rules"), | 	OPT_BOOLEAN('s', "stackval", &opts.stackval, "validate frame pointer rules"), | ||||||
|  | @ -124,6 +125,7 @@ static bool opts_valid(void) | ||||||
| 	    opts.noinstr		|| | 	    opts.noinstr		|| | ||||||
| 	    opts.orc			|| | 	    opts.orc			|| | ||||||
| 	    opts.retpoline		|| | 	    opts.retpoline		|| | ||||||
|  | 	    opts.rethunk		|| | ||||||
| 	    opts.sls			|| | 	    opts.sls			|| | ||||||
| 	    opts.stackval		|| | 	    opts.stackval		|| | ||||||
| 	    opts.static_call		|| | 	    opts.static_call		|| | ||||||
|  | @ -136,6 +138,11 @@ static bool opts_valid(void) | ||||||
| 		return true; | 		return true; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	if (opts.unret && !opts.rethunk) { | ||||||
|  | 		ERROR("--unret requires --rethunk"); | ||||||
|  | 		return false; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	if (opts.dump_orc) | 	if (opts.dump_orc) | ||||||
| 		return true; | 		return true; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -3732,8 +3732,11 @@ static int validate_retpoline(struct objtool_file *file) | ||||||
| 			continue; | 			continue; | ||||||
| 
 | 
 | ||||||
| 		if (insn->type == INSN_RETURN) { | 		if (insn->type == INSN_RETURN) { | ||||||
| 			WARN_FUNC("'naked' return found in RETPOLINE build", | 			if (opts.rethunk) { | ||||||
| 				  insn->sec, insn->offset); | 				WARN_FUNC("'naked' return found in RETHUNK build", | ||||||
|  | 					  insn->sec, insn->offset); | ||||||
|  | 			} else | ||||||
|  | 				continue; | ||||||
| 		} else { | 		} else { | ||||||
| 			WARN_FUNC("indirect %s found in RETPOLINE build", | 			WARN_FUNC("indirect %s found in RETPOLINE build", | ||||||
| 				  insn->sec, insn->offset, | 				  insn->sec, insn->offset, | ||||||
|  | @ -4264,7 +4267,9 @@ int check(struct objtool_file *file) | ||||||
| 		if (ret < 0) | 		if (ret < 0) | ||||||
| 			goto out; | 			goto out; | ||||||
| 		warnings += ret; | 		warnings += ret; | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
|  | 	if (opts.rethunk) { | ||||||
| 		ret = create_return_sites_sections(file); | 		ret = create_return_sites_sections(file); | ||||||
| 		if (ret < 0) | 		if (ret < 0) | ||||||
| 			goto out; | 			goto out; | ||||||
|  |  | ||||||
|  | @ -19,6 +19,7 @@ struct opts { | ||||||
| 	bool noinstr; | 	bool noinstr; | ||||||
| 	bool orc; | 	bool orc; | ||||||
| 	bool retpoline; | 	bool retpoline; | ||||||
|  | 	bool rethunk; | ||||||
| 	bool unret; | 	bool unret; | ||||||
| 	bool sls; | 	bool sls; | ||||||
| 	bool stackval; | 	bool stackval; | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Peter Zijlstra
						Peter Zijlstra