mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	kvm: x86: add tsc_offset field to struct kvm_vcpu_arch
A future commit will want to easily read a vCPU's TSC offset, so we store it in struct kvm_arch_vcpu_arch for easy access. Signed-off-by: Luiz Capitulino <lcapitulino@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
		
							parent
							
								
									ad53e35ae5
								
							
						
					
					
						commit
						a545ab6a00
					
				
					 2 changed files with 9 additions and 2 deletions
				
			
		| 
						 | 
				
			
			@ -568,6 +568,7 @@ struct kvm_vcpu_arch {
 | 
			
		|||
		struct kvm_steal_time steal;
 | 
			
		||||
	} st;
 | 
			
		||||
 | 
			
		||||
	u64 tsc_offset;
 | 
			
		||||
	u64 last_guest_tsc;
 | 
			
		||||
	u64 last_host_tsc;
 | 
			
		||||
	u64 tsc_offset_adjustment;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1413,6 +1413,12 @@ u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
 | 
			
		||||
 | 
			
		||||
static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 | 
			
		||||
{
 | 
			
		||||
	kvm_x86_ops->write_tsc_offset(vcpu, offset);
 | 
			
		||||
	vcpu->arch.tsc_offset = offset;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 | 
			
		||||
{
 | 
			
		||||
	struct kvm *kvm = vcpu->kvm;
 | 
			
		||||
| 
						 | 
				
			
			@ -1522,7 +1528,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 | 
			
		|||
 | 
			
		||||
	if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
 | 
			
		||||
		update_ia32_tsc_adjust_msr(vcpu, offset);
 | 
			
		||||
	kvm_x86_ops->write_tsc_offset(vcpu, offset);
 | 
			
		||||
	kvm_vcpu_write_tsc_offset(vcpu, offset);
 | 
			
		||||
	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 | 
			
		||||
 | 
			
		||||
	spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
 | 
			
		||||
| 
						 | 
				
			
			@ -2750,7 +2756,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 | 
			
		|||
		if (check_tsc_unstable()) {
 | 
			
		||||
			u64 offset = kvm_compute_tsc_offset(vcpu,
 | 
			
		||||
						vcpu->arch.last_guest_tsc);
 | 
			
		||||
			kvm_x86_ops->write_tsc_offset(vcpu, offset);
 | 
			
		||||
			kvm_vcpu_write_tsc_offset(vcpu, offset);
 | 
			
		||||
			vcpu->arch.tsc_catchup = 1;
 | 
			
		||||
		}
 | 
			
		||||
		/*
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue