mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	x86/speculation: Fill RSB on vmexit for IBRS
Prevent RSB underflow/poisoning attacks with RSB. While at it, add a bunch of comments to attempt to document the current state of tribal knowledge about RSB attacks and what exactly is being mitigated. Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Borislav Petkov <bp@suse.de>
This commit is contained in:
		
							parent
							
								
									bea7e31a5c
								
							
						
					
					
						commit
						9756bba284
					
				
					 3 changed files with 62 additions and 9 deletions
				
			
		| 
						 | 
					@ -204,7 +204,7 @@
 | 
				
			||||||
#define X86_FEATURE_XCOMPACTED		( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
 | 
					#define X86_FEATURE_XCOMPACTED		( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
 | 
				
			||||||
#define X86_FEATURE_PTI			( 7*32+11) /* Kernel Page Table Isolation enabled */
 | 
					#define X86_FEATURE_PTI			( 7*32+11) /* Kernel Page Table Isolation enabled */
 | 
				
			||||||
#define X86_FEATURE_KERNEL_IBRS		( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
 | 
					#define X86_FEATURE_KERNEL_IBRS		( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
 | 
				
			||||||
/* FREE!				( 7*32+13) */
 | 
					#define X86_FEATURE_RSB_VMEXIT		( 7*32+13) /* "" Fill RSB on VM-Exit */
 | 
				
			||||||
#define X86_FEATURE_INTEL_PPIN		( 7*32+14) /* Intel Processor Inventory Number */
 | 
					#define X86_FEATURE_INTEL_PPIN		( 7*32+14) /* Intel Processor Inventory Number */
 | 
				
			||||||
#define X86_FEATURE_CDP_L2		( 7*32+15) /* Code and Data Prioritization L2 */
 | 
					#define X86_FEATURE_CDP_L2		( 7*32+15) /* Code and Data Prioritization L2 */
 | 
				
			||||||
#define X86_FEATURE_MSR_SPEC_CTRL	( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
 | 
					#define X86_FEATURE_MSR_SPEC_CTRL	( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1401,16 +1401,69 @@ static void __init spectre_v2_select_mitigation(void)
 | 
				
			||||||
	pr_info("%s\n", spectre_v2_strings[mode]);
 | 
						pr_info("%s\n", spectre_v2_strings[mode]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * If spectre v2 protection has been enabled, unconditionally fill
 | 
						 * If Spectre v2 protection has been enabled, fill the RSB during a
 | 
				
			||||||
	 * RSB during a context switch; this protects against two independent
 | 
						 * context switch.  In general there are two types of RSB attacks
 | 
				
			||||||
	 * issues:
 | 
						 * across context switches, for which the CALLs/RETs may be unbalanced.
 | 
				
			||||||
	 *
 | 
						 *
 | 
				
			||||||
	 *	- RSB underflow (and switch to BTB) on Skylake+
 | 
						 * 1) RSB underflow
 | 
				
			||||||
	 *	- SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
 | 
						 *
 | 
				
			||||||
 | 
						 *    Some Intel parts have "bottomless RSB".  When the RSB is empty,
 | 
				
			||||||
 | 
						 *    speculated return targets may come from the branch predictor,
 | 
				
			||||||
 | 
						 *    which could have a user-poisoned BTB or BHB entry.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 *    AMD has it even worse: *all* returns are speculated from the BTB,
 | 
				
			||||||
 | 
						 *    regardless of the state of the RSB.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 *    When IBRS or eIBRS is enabled, the "user -> kernel" attack
 | 
				
			||||||
 | 
						 *    scenario is mitigated by the IBRS branch prediction isolation
 | 
				
			||||||
 | 
						 *    properties, so the RSB buffer filling wouldn't be necessary to
 | 
				
			||||||
 | 
						 *    protect against this type of attack.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 *    The "user -> user" attack scenario is mitigated by RSB filling.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * 2) Poisoned RSB entry
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 *    If the 'next' in-kernel return stack is shorter than 'prev',
 | 
				
			||||||
 | 
						 *    'next' could be tricked into speculating with a user-poisoned RSB
 | 
				
			||||||
 | 
						 *    entry.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 *    The "user -> kernel" attack scenario is mitigated by SMEP and
 | 
				
			||||||
 | 
						 *    eIBRS.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 *    The "user -> user" scenario, also known as SpectreBHB, requires
 | 
				
			||||||
 | 
						 *    RSB clearing.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * So to mitigate all cases, unconditionally fill RSB on context
 | 
				
			||||||
 | 
						 * switches.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * FIXME: Is this pointless for retbleed-affected AMD?
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
 | 
						setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
 | 
				
			||||||
	pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
 | 
						pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Similar to context switches, there are two types of RSB attacks
 | 
				
			||||||
 | 
						 * after vmexit:
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * 1) RSB underflow
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * 2) Poisoned RSB entry
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * When retpoline is enabled, both are mitigated by filling/clearing
 | 
				
			||||||
 | 
						 * the RSB.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
 | 
				
			||||||
 | 
						 * prediction isolation protections, RSB still needs to be cleared
 | 
				
			||||||
 | 
						 * because of #2.  Note that SMEP provides no protection here, unlike
 | 
				
			||||||
 | 
						 * user-space-poisoned RSB entries.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * eIBRS, on the other hand, has RSB-poisoning protections, so it
 | 
				
			||||||
 | 
						 * doesn't need RSB clearing after vmexit.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (boot_cpu_has(X86_FEATURE_RETPOLINE) ||
 | 
				
			||||||
 | 
						    boot_cpu_has(X86_FEATURE_KERNEL_IBRS))
 | 
				
			||||||
 | 
							setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Retpoline protects the kernel, but doesn't protect firmware.  IBRS
 | 
						 * Retpoline protects the kernel, but doesn't protect firmware.  IBRS
 | 
				
			||||||
	 * and Enhanced IBRS protect firmware too, so enable IBRS around
 | 
						 * and Enhanced IBRS protect firmware too, so enable IBRS around
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -194,15 +194,15 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
 | 
				
			||||||
	 * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before
 | 
						 * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before
 | 
				
			||||||
	 * the first unbalanced RET after vmexit!
 | 
						 * the first unbalanced RET after vmexit!
 | 
				
			||||||
	 *
 | 
						 *
 | 
				
			||||||
	 * For retpoline, RSB filling is needed to prevent poisoned RSB entries
 | 
						 * For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB
 | 
				
			||||||
	 * and (in some cases) RSB underflow.
 | 
						 * entries and (in some cases) RSB underflow.
 | 
				
			||||||
	 *
 | 
						 *
 | 
				
			||||||
	 * eIBRS has its own protection against poisoned RSB, so it doesn't
 | 
						 * eIBRS has its own protection against poisoned RSB, so it doesn't
 | 
				
			||||||
	 * need the RSB filling sequence.  But it does need to be enabled
 | 
						 * need the RSB filling sequence.  But it does need to be enabled
 | 
				
			||||||
	 * before the first unbalanced RET.
 | 
						 * before the first unbalanced RET.
 | 
				
			||||||
         */
 | 
					         */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
 | 
						FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pop %_ASM_ARG2	/* @flags */
 | 
						pop %_ASM_ARG2	/* @flags */
 | 
				
			||||||
	pop %_ASM_ARG1	/* @vmx */
 | 
						pop %_ASM_ARG1	/* @vmx */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue