mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	ARM:
- Fix alignment of the new HYP sections - Fix GICR_TYPER access from userspace S390: - do not reset the global diag318 data for per-cpu reset - do not mark memory as protected too early - fix for destroy page ultravisor call x86: - fix for SEV debugging - fix incorrect return code - fix for "noapic" with PIC in userspace and LAPIC in kernel - fix for 5-level paging -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAl/BKpQUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroPrZgf+Jdw1ONU5hFLl5Xz2YneVppqMr3nh X/Nr/dGzP+ve2FPNgkMotwqOWb/6jwKYKbliB2Q6fS51/7MiV7TDizna8ZpzEn12 M0/NMWtW7Luq7yTTnXUhClG4QfRvn90EaflxUYxCBSRRhDleJ9sCl4Ga5b1fDIdQ AeDdqJV4ElCGUrPM1my4vemrbFeiiEeDeWZvb6TP5LlJS+EDZeehk9zEAB7PFwAu oX3O8WUbRxRYakZR1PPIn8e0qh2zaVDFUk/sZKJLOCCPx2UnOErf3jV6rQEMeSPC 5aOspfq+gI3jukufdyNxcKxRSj8Jw63f0vDaUgd4H71dsG390gM6onQiQg== =IyC5 -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm fixes from Paolo Bonzini: "ARM: - Fix alignment of the new HYP sections - Fix GICR_TYPER access from userspace S390: - do not reset the global diag318 data for per-cpu reset - do not mark memory as protected too early - fix for destroy page ultravisor call x86: - fix for SEV debugging - fix incorrect return code - fix for 'noapic' with PIC in userspace and LAPIC in kernel - fix for 5-level paging" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: kvm: x86/mmu: Fix get_mmio_spte() on CPUs supporting 5-level PT KVM: x86: Fix split-irqchip vs interrupt injection window request KVM: x86: handle !lapic_in_kernel case in kvm_cpu_*_extint MAINTAINERS: Update email address for Sean Christopherson MAINTAINERS: add uv.c also to KVM/s390 s390/uv: handle destroy page legacy interface KVM: arm64: vgic-v3: Drop the reporting of GICR_TYPER.Last for userspace KVM: SVM: fix error return code in svm_create_vcpu() KVM: SVM: Fix offset computation bug in __sev_dbg_decrypt(). KVM: arm64: Correctly align nVHE percpu data KVM: s390: remove diag318 reset code KVM: s390: pv: Mark mm as protected after the set secure parameters and improve cleanup
This commit is contained in:
		
						commit
						3913a2bc81
					
				
					 15 changed files with 91 additions and 70 deletions
				
			
		
							
								
								
									
										1
									
								
								.mailmap
									
									
									
									
									
								
							
							
						
						
									
										1
									
								
								.mailmap
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -290,6 +290,7 @@ Santosh Shilimkar <ssantosh@kernel.org>
 | 
			
		|||
Sarangdhar Joshi <spjoshi@codeaurora.org>
 | 
			
		||||
Sascha Hauer <s.hauer@pengutronix.de>
 | 
			
		||||
S.Çağlar Onur <caglar@pardus.org.tr>
 | 
			
		||||
Sean Christopherson <seanjc@google.com> <sean.j.christopherson@intel.com>
 | 
			
		||||
Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
 | 
			
		||||
Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
 | 
			
		||||
Sebastian Reichel <sre@kernel.org> <sre@debian.org>
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -9646,6 +9646,7 @@ F:	Documentation/virt/kvm/s390*
 | 
			
		|||
F:	arch/s390/include/asm/gmap.h
 | 
			
		||||
F:	arch/s390/include/asm/kvm*
 | 
			
		||||
F:	arch/s390/include/uapi/asm/kvm*
 | 
			
		||||
F:	arch/s390/kernel/uv.c
 | 
			
		||||
F:	arch/s390/kvm/
 | 
			
		||||
F:	arch/s390/mm/gmap.c
 | 
			
		||||
F:	tools/testing/selftests/kvm/*/s390x/
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -13,6 +13,11 @@
 | 
			
		|||
 | 
			
		||||
SECTIONS {
 | 
			
		||||
	HYP_SECTION(.text)
 | 
			
		||||
	/*
 | 
			
		||||
	 * .hyp..data..percpu needs to be page aligned to maintain the same
 | 
			
		||||
	 * alignment for when linking into vmlinux.
 | 
			
		||||
	 */
 | 
			
		||||
	. = ALIGN(PAGE_SIZE);
 | 
			
		||||
	HYP_SECTION_NAME(.data..percpu) : {
 | 
			
		||||
		PERCPU_INPUT(L1_CACHE_BYTES)
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -273,6 +273,23 @@ static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
 | 
			
		|||
	return extract_bytes(value, addr & 7, len);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu,
 | 
			
		||||
						 gpa_t addr, unsigned int len)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
 | 
			
		||||
	int target_vcpu_id = vcpu->vcpu_id;
 | 
			
		||||
	u64 value;
 | 
			
		||||
 | 
			
		||||
	value = (u64)(mpidr & GENMASK(23, 0)) << 32;
 | 
			
		||||
	value |= ((target_vcpu_id & 0xffff) << 8);
 | 
			
		||||
 | 
			
		||||
	if (vgic_has_its(vcpu->kvm))
 | 
			
		||||
		value |= GICR_TYPER_PLPIS;
 | 
			
		||||
 | 
			
		||||
	/* reporting of the Last bit is not supported for userspace */
 | 
			
		||||
	return extract_bytes(value, addr & 7, len);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
 | 
			
		||||
					     gpa_t addr, unsigned int len)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -593,8 +610,9 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
 | 
			
		|||
	REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
 | 
			
		||||
		vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
 | 
			
		||||
		VGIC_ACCESS_32bit),
 | 
			
		||||
	REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
 | 
			
		||||
		vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
 | 
			
		||||
	REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER,
 | 
			
		||||
		vgic_mmio_read_v3r_typer, vgic_mmio_write_wi,
 | 
			
		||||
		vgic_uaccess_read_v3r_typer, vgic_mmio_uaccess_write_wi, 8,
 | 
			
		||||
		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
 | 
			
		||||
	REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
 | 
			
		||||
		vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -129,8 +129,15 @@ int uv_destroy_page(unsigned long paddr)
 | 
			
		|||
		.paddr = paddr
 | 
			
		||||
	};
 | 
			
		||||
 | 
			
		||||
	if (uv_call(0, (u64)&uvcb))
 | 
			
		||||
	if (uv_call(0, (u64)&uvcb)) {
 | 
			
		||||
		/*
 | 
			
		||||
		 * Older firmware uses 107/d as an indication of a non secure
 | 
			
		||||
		 * page. Let us emulate the newer variant (no-op).
 | 
			
		||||
		 */
 | 
			
		||||
		if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
 | 
			
		||||
			return 0;
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2312,7 +2312,7 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
 | 
			
		|||
		struct kvm_s390_pv_unp unp = {};
 | 
			
		||||
 | 
			
		||||
		r = -EINVAL;
 | 
			
		||||
		if (!kvm_s390_pv_is_protected(kvm))
 | 
			
		||||
		if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
 | 
			
		||||
			break;
 | 
			
		||||
 | 
			
		||||
		r = -EFAULT;
 | 
			
		||||
| 
						 | 
				
			
			@ -3564,7 +3564,6 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
 | 
			
		|||
		vcpu->arch.sie_block->pp = 0;
 | 
			
		||||
		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
 | 
			
		||||
		vcpu->arch.sie_block->todpr = 0;
 | 
			
		||||
		vcpu->arch.sie_block->cpnc = 0;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -3582,7 +3581,6 @@ static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
 | 
			
		|||
 | 
			
		||||
	regs->etoken = 0;
 | 
			
		||||
	regs->etoken_extension = 0;
 | 
			
		||||
	regs->diag318 = 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -208,7 +208,6 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
 | 
			
		|||
		return -EIO;
 | 
			
		||||
	}
 | 
			
		||||
	kvm->arch.gmap->guest_handle = uvcb.guest_handle;
 | 
			
		||||
	atomic_set(&kvm->mm->context.is_protected, 1);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -228,6 +227,8 @@ int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
 | 
			
		|||
	*rrc = uvcb.header.rrc;
 | 
			
		||||
	KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
 | 
			
		||||
		     *rc, *rrc);
 | 
			
		||||
	if (!cc)
 | 
			
		||||
		atomic_set(&kvm->mm->context.is_protected, 1);
 | 
			
		||||
	return cc ? -EINVAL : 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2690,6 +2690,8 @@ static const struct mm_walk_ops reset_acc_walk_ops = {
 | 
			
		|||
#include <linux/sched/mm.h>
 | 
			
		||||
void s390_reset_acc(struct mm_struct *mm)
 | 
			
		||||
{
 | 
			
		||||
	if (!mm_is_protected(mm))
 | 
			
		||||
		return;
 | 
			
		||||
	/*
 | 
			
		||||
	 * we might be called during
 | 
			
		||||
	 * reset:                             we walk the pages and clear
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1656,6 +1656,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
 | 
			
		|||
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
 | 
			
		||||
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
 | 
			
		||||
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
 | 
			
		||||
int kvm_cpu_has_extint(struct kvm_vcpu *v);
 | 
			
		||||
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
 | 
			
		||||
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
 | 
			
		||||
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -40,29 +40,10 @@ static int pending_userspace_extint(struct kvm_vcpu *v)
 | 
			
		|||
 * check if there is pending interrupt from
 | 
			
		||||
 * non-APIC source without intack.
 | 
			
		||||
 */
 | 
			
		||||
static int kvm_cpu_has_extint(struct kvm_vcpu *v)
 | 
			
		||||
{
 | 
			
		||||
	u8 accept = kvm_apic_accept_pic_intr(v);
 | 
			
		||||
 | 
			
		||||
	if (accept) {
 | 
			
		||||
		if (irqchip_split(v->kvm))
 | 
			
		||||
			return pending_userspace_extint(v);
 | 
			
		||||
		else
 | 
			
		||||
			return v->kvm->arch.vpic->output;
 | 
			
		||||
	} else
 | 
			
		||||
		return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * check if there is injectable interrupt:
 | 
			
		||||
 * when virtual interrupt delivery enabled,
 | 
			
		||||
 * interrupt from apic will handled by hardware,
 | 
			
		||||
 * we don't need to check it here.
 | 
			
		||||
 */
 | 
			
		||||
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
 | 
			
		||||
int kvm_cpu_has_extint(struct kvm_vcpu *v)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * FIXME: interrupt.injected represents an interrupt that it's
 | 
			
		||||
	 * FIXME: interrupt.injected represents an interrupt whose
 | 
			
		||||
	 * side-effects have already been applied (e.g. bit from IRR
 | 
			
		||||
	 * already moved to ISR). Therefore, it is incorrect to rely
 | 
			
		||||
	 * on interrupt.injected to know if there is a pending
 | 
			
		||||
| 
						 | 
				
			
			@ -75,6 +56,23 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
 | 
			
		|||
	if (!lapic_in_kernel(v))
 | 
			
		||||
		return v->arch.interrupt.injected;
 | 
			
		||||
 | 
			
		||||
	if (!kvm_apic_accept_pic_intr(v))
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	if (irqchip_split(v->kvm))
 | 
			
		||||
		return pending_userspace_extint(v);
 | 
			
		||||
	else
 | 
			
		||||
		return v->kvm->arch.vpic->output;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * check if there is injectable interrupt:
 | 
			
		||||
 * when virtual interrupt delivery enabled,
 | 
			
		||||
 * interrupt from apic will handled by hardware,
 | 
			
		||||
 * we don't need to check it here.
 | 
			
		||||
 */
 | 
			
		||||
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
 | 
			
		||||
{
 | 
			
		||||
	if (kvm_cpu_has_extint(v))
 | 
			
		||||
		return 1;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -91,20 +89,6 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_injectable_intr);
 | 
			
		|||
 */
 | 
			
		||||
int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * FIXME: interrupt.injected represents an interrupt that it's
 | 
			
		||||
	 * side-effects have already been applied (e.g. bit from IRR
 | 
			
		||||
	 * already moved to ISR). Therefore, it is incorrect to rely
 | 
			
		||||
	 * on interrupt.injected to know if there is a pending
 | 
			
		||||
	 * interrupt in the user-mode LAPIC.
 | 
			
		||||
	 * This leads to nVMX/nSVM not be able to distinguish
 | 
			
		||||
	 * if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on
 | 
			
		||||
	 * pending interrupt or should re-inject an injected
 | 
			
		||||
	 * interrupt.
 | 
			
		||||
	 */
 | 
			
		||||
	if (!lapic_in_kernel(v))
 | 
			
		||||
		return v->arch.interrupt.injected;
 | 
			
		||||
 | 
			
		||||
	if (kvm_cpu_has_extint(v))
 | 
			
		||||
		return 1;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -118,16 +102,21 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
 | 
			
		|||
 */
 | 
			
		||||
static int kvm_cpu_get_extint(struct kvm_vcpu *v)
 | 
			
		||||
{
 | 
			
		||||
	if (kvm_cpu_has_extint(v)) {
 | 
			
		||||
		if (irqchip_split(v->kvm)) {
 | 
			
		||||
			int vector = v->arch.pending_external_vector;
 | 
			
		||||
 | 
			
		||||
			v->arch.pending_external_vector = -1;
 | 
			
		||||
			return vector;
 | 
			
		||||
		} else
 | 
			
		||||
			return kvm_pic_read_irq(v->kvm); /* PIC */
 | 
			
		||||
	} else
 | 
			
		||||
	if (!kvm_cpu_has_extint(v)) {
 | 
			
		||||
		WARN_ON(!lapic_in_kernel(v));
 | 
			
		||||
		return -1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!lapic_in_kernel(v))
 | 
			
		||||
		return v->arch.interrupt.nr;
 | 
			
		||||
 | 
			
		||||
	if (irqchip_split(v->kvm)) {
 | 
			
		||||
		int vector = v->arch.pending_external_vector;
 | 
			
		||||
 | 
			
		||||
		v->arch.pending_external_vector = -1;
 | 
			
		||||
		return vector;
 | 
			
		||||
	} else
 | 
			
		||||
		return kvm_pic_read_irq(v->kvm); /* PIC */
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -135,13 +124,7 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
 | 
			
		|||
 */
 | 
			
		||||
int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
 | 
			
		||||
{
 | 
			
		||||
	int vector;
 | 
			
		||||
 | 
			
		||||
	if (!lapic_in_kernel(v))
 | 
			
		||||
		return v->arch.interrupt.nr;
 | 
			
		||||
 | 
			
		||||
	vector = kvm_cpu_get_extint(v);
 | 
			
		||||
 | 
			
		||||
	int vector = kvm_cpu_get_extint(v);
 | 
			
		||||
	if (vector != -1)
 | 
			
		||||
		return vector;			/* PIC */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2465,7 +2465,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
 | 
			
		|||
	struct kvm_lapic *apic = vcpu->arch.apic;
 | 
			
		||||
	u32 ppr;
 | 
			
		||||
 | 
			
		||||
	if (!kvm_apic_hw_enabled(apic))
 | 
			
		||||
	if (!kvm_apic_present(vcpu))
 | 
			
		||||
		return -1;
 | 
			
		||||
 | 
			
		||||
	__apic_update_ppr(apic, &ppr);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3517,7 +3517,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
 | 
			
		|||
{
 | 
			
		||||
	u64 sptes[PT64_ROOT_MAX_LEVEL];
 | 
			
		||||
	struct rsvd_bits_validate *rsvd_check;
 | 
			
		||||
	int root = vcpu->arch.mmu->root_level;
 | 
			
		||||
	int root = vcpu->arch.mmu->shadow_root_level;
 | 
			
		||||
	int leaf;
 | 
			
		||||
	int level;
 | 
			
		||||
	bool reserved = false;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -642,8 +642,8 @@ static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
 | 
			
		|||
	 * Its safe to read more than we are asked, caller should ensure that
 | 
			
		||||
	 * destination has enough space.
 | 
			
		||||
	 */
 | 
			
		||||
	src_paddr = round_down(src_paddr, 16);
 | 
			
		||||
	offset = src_paddr & 15;
 | 
			
		||||
	src_paddr = round_down(src_paddr, 16);
 | 
			
		||||
	sz = round_up(sz + offset, 16);
 | 
			
		||||
 | 
			
		||||
	return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1309,8 +1309,10 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
 | 
			
		|||
		svm->avic_is_running = true;
 | 
			
		||||
 | 
			
		||||
	svm->msrpm = svm_vcpu_alloc_msrpm();
 | 
			
		||||
	if (!svm->msrpm)
 | 
			
		||||
	if (!svm->msrpm) {
 | 
			
		||||
		err = -ENOMEM;
 | 
			
		||||
		goto error_free_vmcb_page;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	svm_vcpu_init_msrpm(vcpu, svm->msrpm);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -4051,21 +4051,23 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
 | 
			
		|||
 | 
			
		||||
static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * We can accept userspace's request for interrupt injection
 | 
			
		||||
	 * as long as we have a place to store the interrupt number.
 | 
			
		||||
	 * The actual injection will happen when the CPU is able to
 | 
			
		||||
	 * deliver the interrupt.
 | 
			
		||||
	 */
 | 
			
		||||
	if (kvm_cpu_has_extint(vcpu))
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	/* Acknowledging ExtINT does not happen if LINT0 is masked.  */
 | 
			
		||||
	return (!lapic_in_kernel(vcpu) ||
 | 
			
		||||
		kvm_apic_accept_pic_intr(vcpu));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * if userspace requested an interrupt window, check that the
 | 
			
		||||
 * interrupt window is open.
 | 
			
		||||
 *
 | 
			
		||||
 * No need to exit to userspace if we already have an interrupt queued.
 | 
			
		||||
 */
 | 
			
		||||
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
 | 
			
		||||
{
 | 
			
		||||
	return kvm_arch_interrupt_allowed(vcpu) &&
 | 
			
		||||
		!kvm_cpu_has_interrupt(vcpu) &&
 | 
			
		||||
		!kvm_event_needs_reinjection(vcpu) &&
 | 
			
		||||
		kvm_cpu_accept_dm_intr(vcpu);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue