forked from mirrors/linux
		
	arm64: KVM: Propagate full Spectre v2 workaround state to KVM guests
Recent commits added the explicit notion of "workaround not required" to the state of the Spectre v2 (aka. BP_HARDENING) workaround, where we just had "needed" and "unknown" before. Export this knowledge to the rest of the kernel and enhance the existing kvm_arm_harden_branch_predictor() to report this new state as well. Export this new state to guests when they use KVM's firmware interface emulation. Signed-off-by: Andre Przywara <andre.przywara@arm.com> Reviewed-by: Steven Price <steven.price@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
		
							parent
							
								
									80f393a23b
								
							
						
					
					
						commit
						c118bbb527
					
				
					 5 changed files with 56 additions and 11 deletions
				
			
		|  | @ -362,7 +362,11 @@ static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} | |||
| static inline void kvm_arm_vhe_guest_enter(void) {} | ||||
| static inline void kvm_arm_vhe_guest_exit(void) {} | ||||
| 
 | ||||
| static inline bool kvm_arm_harden_branch_predictor(void) | ||||
| #define KVM_BP_HARDEN_UNKNOWN		-1 | ||||
| #define KVM_BP_HARDEN_WA_NEEDED		0 | ||||
| #define KVM_BP_HARDEN_NOT_REQUIRED	1 | ||||
| 
 | ||||
| static inline int kvm_arm_harden_branch_predictor(void) | ||||
| { | ||||
| 	switch(read_cpuid_part()) { | ||||
| #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR | ||||
|  | @ -370,10 +374,12 @@ static inline bool kvm_arm_harden_branch_predictor(void) | |||
| 	case ARM_CPU_PART_CORTEX_A12: | ||||
| 	case ARM_CPU_PART_CORTEX_A15: | ||||
| 	case ARM_CPU_PART_CORTEX_A17: | ||||
| 		return true; | ||||
| 		return KVM_BP_HARDEN_WA_NEEDED; | ||||
| #endif | ||||
| 	case ARM_CPU_PART_CORTEX_A7: | ||||
| 		return KVM_BP_HARDEN_NOT_REQUIRED; | ||||
| 	default: | ||||
| 		return false; | ||||
| 		return KVM_BP_HARDEN_UNKNOWN; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -614,6 +614,12 @@ static inline bool system_uses_irq_prio_masking(void) | |||
| 	       cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING); | ||||
| } | ||||
| 
 | ||||
| #define ARM64_BP_HARDEN_UNKNOWN		-1 | ||||
| #define ARM64_BP_HARDEN_WA_NEEDED	0 | ||||
| #define ARM64_BP_HARDEN_NOT_REQUIRED	1 | ||||
| 
 | ||||
| int get_spectre_v2_workaround_state(void); | ||||
| 
 | ||||
| #define ARM64_SSBD_UNKNOWN		-1 | ||||
| #define ARM64_SSBD_FORCE_DISABLE	0 | ||||
| #define ARM64_SSBD_KERNEL		1 | ||||
|  |  | |||
|  | @ -620,9 +620,21 @@ static inline void kvm_arm_vhe_guest_exit(void) | |||
| 	isb(); | ||||
| } | ||||
| 
 | ||||
| static inline bool kvm_arm_harden_branch_predictor(void) | ||||
| #define KVM_BP_HARDEN_UNKNOWN		-1 | ||||
| #define KVM_BP_HARDEN_WA_NEEDED		0 | ||||
| #define KVM_BP_HARDEN_NOT_REQUIRED	1 | ||||
| 
 | ||||
| static inline int kvm_arm_harden_branch_predictor(void) | ||||
| { | ||||
| 	return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR); | ||||
| 	switch (get_spectre_v2_workaround_state()) { | ||||
| 	case ARM64_BP_HARDEN_WA_NEEDED: | ||||
| 		return KVM_BP_HARDEN_WA_NEEDED; | ||||
| 	case ARM64_BP_HARDEN_NOT_REQUIRED: | ||||
| 		return KVM_BP_HARDEN_NOT_REQUIRED; | ||||
| 	case ARM64_BP_HARDEN_UNKNOWN: | ||||
| 	default: | ||||
| 		return KVM_BP_HARDEN_UNKNOWN; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| #define KVM_SSBD_UNKNOWN		-1 | ||||
|  |  | |||
|  | @ -554,6 +554,17 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) | |||
| static bool __hardenbp_enab = true; | ||||
| static bool __spectrev2_safe = true; | ||||
| 
 | ||||
| int get_spectre_v2_workaround_state(void) | ||||
| { | ||||
| 	if (__spectrev2_safe) | ||||
| 		return ARM64_BP_HARDEN_NOT_REQUIRED; | ||||
| 
 | ||||
| 	if (!__hardenbp_enab) | ||||
| 		return ARM64_BP_HARDEN_UNKNOWN; | ||||
| 
 | ||||
| 	return ARM64_BP_HARDEN_WA_NEEDED; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * List of CPUs that do not need any Spectre-v2 mitigation at all. | ||||
|  */ | ||||
|  | @ -854,13 +865,15 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, | |||
| ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, | ||||
| 		char *buf) | ||||
| { | ||||
| 	if (__spectrev2_safe) | ||||
| 	switch (get_spectre_v2_workaround_state()) { | ||||
| 	case ARM64_BP_HARDEN_NOT_REQUIRED: | ||||
| 		return sprintf(buf, "Not affected\n"); | ||||
| 
 | ||||
| 	if (__hardenbp_enab) | ||||
|         case ARM64_BP_HARDEN_WA_NEEDED: | ||||
| 		return sprintf(buf, "Mitigation: Branch predictor hardening\n"); | ||||
| 
 | ||||
| 	return sprintf(buf, "Vulnerable\n"); | ||||
|         case ARM64_BP_HARDEN_UNKNOWN: | ||||
| 	default: | ||||
| 		return sprintf(buf, "Vulnerable\n"); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| ssize_t cpu_show_spec_store_bypass(struct device *dev, | ||||
|  |  | |||
|  | @ -401,8 +401,16 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) | |||
| 		feature = smccc_get_arg1(vcpu); | ||||
| 		switch(feature) { | ||||
| 		case ARM_SMCCC_ARCH_WORKAROUND_1: | ||||
| 			if (kvm_arm_harden_branch_predictor()) | ||||
| 			switch (kvm_arm_harden_branch_predictor()) { | ||||
| 			case KVM_BP_HARDEN_UNKNOWN: | ||||
| 				break; | ||||
| 			case KVM_BP_HARDEN_WA_NEEDED: | ||||
| 				val = SMCCC_RET_SUCCESS; | ||||
| 				break; | ||||
| 			case KVM_BP_HARDEN_NOT_REQUIRED: | ||||
| 				val = SMCCC_RET_NOT_REQUIRED; | ||||
| 				break; | ||||
| 			} | ||||
| 			break; | ||||
| 		case ARM_SMCCC_ARCH_WORKAROUND_2: | ||||
| 			switch (kvm_arm_have_ssbd()) { | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Andre Przywara
						Andre Przywara