mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	arm64: KVM: Enable !VHE support for :G/:H perf event modifiers
Enable/disable event counters as appropriate when entering and exiting the guest to enable support for guest or host only event counting. For both VHE and non-VHE we switch the counters between host/guest at EL2. The PMU may be on when we change which counters are enabled however we avoid adding an isb as we instead rely on existing context synchronisation events: the eret to enter the guest (__guest_enter) and eret in kvm_call_hyp for __kvm_vcpu_run_nvhe on returning. Signed-off-by: Andrew Murray <andrew.murray@arm.com> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
		
							parent
							
								
									d1947bc4bc
								
							
						
					
					
						commit
						3d91befbb3
					
				
					 3 changed files with 48 additions and 0 deletions
				
			
		| 
						 | 
				
			
			@ -591,6 +591,9 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
 | 
			
		|||
 | 
			
		||||
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
 | 
			
		||||
void kvm_clr_pmu_events(u32 clr);
 | 
			
		||||
 | 
			
		||||
void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt);
 | 
			
		||||
bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt);
 | 
			
		||||
#else
 | 
			
		||||
static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
 | 
			
		||||
static inline void kvm_clr_pmu_events(u32 clr) {}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -566,6 +566,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
 | 
			
		|||
{
 | 
			
		||||
	struct kvm_cpu_context *host_ctxt;
 | 
			
		||||
	struct kvm_cpu_context *guest_ctxt;
 | 
			
		||||
	bool pmu_switch_needed;
 | 
			
		||||
	u64 exit_code;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			@ -585,6 +586,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
 | 
			
		|||
	host_ctxt->__hyp_running_vcpu = vcpu;
 | 
			
		||||
	guest_ctxt = &vcpu->arch.ctxt;
 | 
			
		||||
 | 
			
		||||
	pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
 | 
			
		||||
 | 
			
		||||
	__sysreg_save_state_nvhe(host_ctxt);
 | 
			
		||||
 | 
			
		||||
	__activate_vm(kern_hyp_va(vcpu->kvm));
 | 
			
		||||
| 
						 | 
				
			
			@ -631,6 +634,9 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
 | 
			
		|||
	 */
 | 
			
		||||
	__debug_switch_to_host(vcpu);
 | 
			
		||||
 | 
			
		||||
	if (pmu_switch_needed)
 | 
			
		||||
		__pmu_switch_to_host(host_ctxt);
 | 
			
		||||
 | 
			
		||||
	/* Returning to host will clear PSR.I, remask PMR if needed */
 | 
			
		||||
	if (system_uses_irq_prio_masking())
 | 
			
		||||
		gic_write_pmr(GIC_PRIO_IRQOFF);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -5,6 +5,7 @@
 | 
			
		|||
 */
 | 
			
		||||
#include <linux/kvm_host.h>
 | 
			
		||||
#include <linux/perf_event.h>
 | 
			
		||||
#include <asm/kvm_hyp.h>
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Given the exclude_{host,guest} attributes, determine if we are going
 | 
			
		||||
| 
						 | 
				
			
			@ -43,3 +44,41 @@ void kvm_clr_pmu_events(u32 clr)
 | 
			
		|||
	ctx->pmu_events.events_host &= ~clr;
 | 
			
		||||
	ctx->pmu_events.events_guest &= ~clr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Disable host events, enable guest events
 | 
			
		||||
 */
 | 
			
		||||
bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
 | 
			
		||||
{
 | 
			
		||||
	struct kvm_host_data *host;
 | 
			
		||||
	struct kvm_pmu_events *pmu;
 | 
			
		||||
 | 
			
		||||
	host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
 | 
			
		||||
	pmu = &host->pmu_events;
 | 
			
		||||
 | 
			
		||||
	if (pmu->events_host)
 | 
			
		||||
		write_sysreg(pmu->events_host, pmcntenclr_el0);
 | 
			
		||||
 | 
			
		||||
	if (pmu->events_guest)
 | 
			
		||||
		write_sysreg(pmu->events_guest, pmcntenset_el0);
 | 
			
		||||
 | 
			
		||||
	return (pmu->events_host || pmu->events_guest);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Disable guest events, enable host events
 | 
			
		||||
 */
 | 
			
		||||
void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
 | 
			
		||||
{
 | 
			
		||||
	struct kvm_host_data *host;
 | 
			
		||||
	struct kvm_pmu_events *pmu;
 | 
			
		||||
 | 
			
		||||
	host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
 | 
			
		||||
	pmu = &host->pmu_events;
 | 
			
		||||
 | 
			
		||||
	if (pmu->events_guest)
 | 
			
		||||
		write_sysreg(pmu->events_guest, pmcntenclr_el0);
 | 
			
		||||
 | 
			
		||||
	if (pmu->events_host)
 | 
			
		||||
		write_sysreg(pmu->events_host, pmcntenset_el0);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue