forked from mirrors/linux
		
	x86/kvm/mmu: check if tdp/shadow MMU reconfiguration is needed
MMU reconfiguration in init_kvm_tdp_mmu()/kvm_init_shadow_mmu() can be avoided if the source data used to configure it didn't change; enhance MMU extended role with the required fields and consolidate common code in kvm_calc_mmu_role_common(). Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
		
							parent
							
								
									a336282d77
								
							
						
					
					
						commit
						7dcd575520
					
				
					 2 changed files with 61 additions and 32 deletions
				
			
		| 
						 | 
					@ -293,10 +293,12 @@ union kvm_mmu_extended_role {
 | 
				
			||||||
	struct {
 | 
						struct {
 | 
				
			||||||
		unsigned int valid:1;
 | 
							unsigned int valid:1;
 | 
				
			||||||
		unsigned int execonly:1;
 | 
							unsigned int execonly:1;
 | 
				
			||||||
 | 
							unsigned int cr0_pg:1;
 | 
				
			||||||
		unsigned int cr4_pse:1;
 | 
							unsigned int cr4_pse:1;
 | 
				
			||||||
		unsigned int cr4_pke:1;
 | 
							unsigned int cr4_pke:1;
 | 
				
			||||||
		unsigned int cr4_smap:1;
 | 
							unsigned int cr4_smap:1;
 | 
				
			||||||
		unsigned int cr4_smep:1;
 | 
							unsigned int cr4_smep:1;
 | 
				
			||||||
 | 
							unsigned int cr4_la57:1;
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4728,27 +4728,46 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	union kvm_mmu_extended_role ext = {0};
 | 
						union kvm_mmu_extended_role ext = {0};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ext.cr0_pg = !!is_paging(vcpu);
 | 
				
			||||||
	ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
 | 
						ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
 | 
				
			||||||
	ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
 | 
						ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
 | 
				
			||||||
	ext.cr4_pse = !!is_pse(vcpu);
 | 
						ext.cr4_pse = !!is_pse(vcpu);
 | 
				
			||||||
	ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
 | 
						ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
 | 
				
			||||||
 | 
						ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ext.valid = 1;
 | 
						ext.valid = 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return ext;
 | 
						return ext;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static union kvm_mmu_page_role
 | 
					static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
 | 
				
			||||||
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu)
 | 
											   bool base_only)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	union kvm_mmu_page_role role = {0};
 | 
						union kvm_mmu_role role = {0};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	role.guest_mode = is_guest_mode(vcpu);
 | 
						role.base.access = ACC_ALL;
 | 
				
			||||||
	role.smm = is_smm(vcpu);
 | 
						role.base.nxe = !!is_nx(vcpu);
 | 
				
			||||||
	role.ad_disabled = (shadow_accessed_mask == 0);
 | 
						role.base.cr4_pae = !!is_pae(vcpu);
 | 
				
			||||||
	role.level = kvm_x86_ops->get_tdp_level(vcpu);
 | 
						role.base.cr0_wp = is_write_protection(vcpu);
 | 
				
			||||||
	role.direct = true;
 | 
						role.base.smm = is_smm(vcpu);
 | 
				
			||||||
	role.access = ACC_ALL;
 | 
						role.base.guest_mode = is_guest_mode(vcpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (base_only)
 | 
				
			||||||
 | 
							return role;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						role.ext = kvm_calc_mmu_role_ext(vcpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return role;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static union kvm_mmu_role
 | 
				
			||||||
 | 
					kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						role.base.ad_disabled = (shadow_accessed_mask == 0);
 | 
				
			||||||
 | 
						role.base.level = kvm_x86_ops->get_tdp_level(vcpu);
 | 
				
			||||||
 | 
						role.base.direct = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return role;
 | 
						return role;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -4756,9 +4775,14 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu)
 | 
				
			||||||
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
 | 
					static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct kvm_mmu *context = vcpu->arch.mmu;
 | 
						struct kvm_mmu *context = vcpu->arch.mmu;
 | 
				
			||||||
 | 
						union kvm_mmu_role new_role =
 | 
				
			||||||
 | 
							kvm_calc_tdp_mmu_root_page_role(vcpu, false);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	context->mmu_role.base.word = mmu_base_role_mask.word &
 | 
						new_role.base.word &= mmu_base_role_mask.word;
 | 
				
			||||||
				  kvm_calc_tdp_mmu_root_page_role(vcpu).word;
 | 
						if (new_role.as_u64 == context->mmu_role.as_u64)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						context->mmu_role.as_u64 = new_role.as_u64;
 | 
				
			||||||
	context->page_fault = tdp_page_fault;
 | 
						context->page_fault = tdp_page_fault;
 | 
				
			||||||
	context->sync_page = nonpaging_sync_page;
 | 
						context->sync_page = nonpaging_sync_page;
 | 
				
			||||||
	context->invlpg = nonpaging_invlpg;
 | 
						context->invlpg = nonpaging_invlpg;
 | 
				
			||||||
| 
						 | 
					@ -4798,29 +4822,23 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
 | 
				
			||||||
	reset_tdp_shadow_zero_bits_mask(vcpu, context);
 | 
						reset_tdp_shadow_zero_bits_mask(vcpu, context);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static union kvm_mmu_page_role
 | 
					static union kvm_mmu_role
 | 
				
			||||||
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu)
 | 
					kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	union kvm_mmu_page_role role = {0};
 | 
						union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
 | 
				
			||||||
	bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
 | 
					 | 
				
			||||||
	bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	role.nxe = is_nx(vcpu);
 | 
						role.base.smep_andnot_wp = role.ext.cr4_smep &&
 | 
				
			||||||
	role.cr4_pae = !!is_pae(vcpu);
 | 
							!is_write_protection(vcpu);
 | 
				
			||||||
	role.cr0_wp  = is_write_protection(vcpu);
 | 
						role.base.smap_andnot_wp = role.ext.cr4_smap &&
 | 
				
			||||||
	role.smep_andnot_wp = smep && !is_write_protection(vcpu);
 | 
							!is_write_protection(vcpu);
 | 
				
			||||||
	role.smap_andnot_wp = smap && !is_write_protection(vcpu);
 | 
						role.base.direct = !is_paging(vcpu);
 | 
				
			||||||
	role.guest_mode = is_guest_mode(vcpu);
 | 
					 | 
				
			||||||
	role.smm = is_smm(vcpu);
 | 
					 | 
				
			||||||
	role.direct = !is_paging(vcpu);
 | 
					 | 
				
			||||||
	role.access = ACC_ALL;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!is_long_mode(vcpu))
 | 
						if (!is_long_mode(vcpu))
 | 
				
			||||||
		role.level = PT32E_ROOT_LEVEL;
 | 
							role.base.level = PT32E_ROOT_LEVEL;
 | 
				
			||||||
	else if (is_la57_mode(vcpu))
 | 
						else if (is_la57_mode(vcpu))
 | 
				
			||||||
		role.level = PT64_ROOT_5LEVEL;
 | 
							role.base.level = PT64_ROOT_5LEVEL;
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		role.level = PT64_ROOT_4LEVEL;
 | 
							role.base.level = PT64_ROOT_4LEVEL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return role;
 | 
						return role;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -4828,6 +4846,12 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu)
 | 
				
			||||||
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
 | 
					void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct kvm_mmu *context = vcpu->arch.mmu;
 | 
						struct kvm_mmu *context = vcpu->arch.mmu;
 | 
				
			||||||
 | 
						union kvm_mmu_role new_role =
 | 
				
			||||||
 | 
							kvm_calc_shadow_mmu_root_page_role(vcpu, false);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						new_role.base.word &= mmu_base_role_mask.word;
 | 
				
			||||||
 | 
						if (new_role.as_u64 == context->mmu_role.as_u64)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!is_paging(vcpu))
 | 
						if (!is_paging(vcpu))
 | 
				
			||||||
		nonpaging_init_context(vcpu, context);
 | 
							nonpaging_init_context(vcpu, context);
 | 
				
			||||||
| 
						 | 
					@ -4838,8 +4862,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		paging32_init_context(vcpu, context);
 | 
							paging32_init_context(vcpu, context);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	context->mmu_role.base.word = mmu_base_role_mask.word &
 | 
						context->mmu_role.as_u64 = new_role.as_u64;
 | 
				
			||||||
				  kvm_calc_shadow_mmu_root_page_role(vcpu).word;
 | 
					 | 
				
			||||||
	reset_shadow_zero_bits_mask(vcpu, context);
 | 
						reset_shadow_zero_bits_mask(vcpu, context);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
 | 
					EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
 | 
				
			||||||
| 
						 | 
					@ -4977,10 +5000,14 @@ EXPORT_SYMBOL_GPL(kvm_init_mmu);
 | 
				
			||||||
static union kvm_mmu_page_role
 | 
					static union kvm_mmu_page_role
 | 
				
			||||||
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
 | 
					kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						union kvm_mmu_role role;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (tdp_enabled)
 | 
						if (tdp_enabled)
 | 
				
			||||||
		return kvm_calc_tdp_mmu_root_page_role(vcpu);
 | 
							role = kvm_calc_tdp_mmu_root_page_role(vcpu, true);
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		return kvm_calc_shadow_mmu_root_page_role(vcpu);
 | 
							role = kvm_calc_shadow_mmu_root_page_role(vcpu, true);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return role.base;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
 | 
					void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue