mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	ARM:
  - Improved guest IPA space support (32 to 52 bits)
 
  - RAS event delivery for 32bit
 
  - PMU fixes
 
  - Guest entry hardening
 
  - Various cleanups
 
  - Port of dirty_log_test selftest
 
 PPC:
  - Nested HV KVM support for radix guests on POWER9.  The performance is
    much better than with PR KVM.  Migration and arbitrary level of
    nesting is supported.
 
  - Disable nested HV-KVM on early POWER9 chips that need a particular hardware
    bug workaround
 
  - One VM per core mode to prevent potential data leaks
 
  - PCI pass-through optimization
 
  - merge ppc-kvm topic branch and kvm-ppc-fixes to get a better base
 
 s390:
  - Initial version of AP crypto virtualization via vfio-mdev
 
  - Improvement for vfio-ap
 
  - Set the host program identifier
 
  - Optimize page table locking
 
 x86:
  - Enable nested virtualization by default
 
  - Implement Hyper-V IPI hypercalls
 
  - Improve #PF and #DB handling
 
  - Allow guests to use Enlightened VMCS
 
  - Add migration selftests for VMCS and Enlightened VMCS
 
  - Allow coalesced PIO accesses
 
  - Add an option to perform nested VMCS host state consistency check
    through hardware
 
  - Automatic tuning of lapic_timer_advance_ns
 
  - Many fixes, minor improvements, and cleanups
 -----BEGIN PGP SIGNATURE-----
 
 iQEcBAABCAAGBQJb0FINAAoJEED/6hsPKofoI60IAJRS3vOAQ9Fav8cJsO1oBHcX
 3+NexfnBke1bzrjIR3SUcHKGZbdnVPNZc+Q4JjIbPpPmmOMU5jc9BC1dmd5f4Vzh
 BMnQ0yCvgFv3A3fy/Icx1Z8NJppxosdmqdQLrQrNo8aD3cjnqY2yQixdXrAfzLzw
 XEgKdIFCCz8oVN/C9TT4wwJn6l9OE7BM5bMKGFy5VNXzMu7t64UDOLbbjZxNgi1g
 teYvfVGdt5mH0N7b2GPPWRbJmgnz5ygVVpVNQUEFrdKZoCm6r5u9d19N+RRXAwan
 ZYFj10W2T8pJOUf3tryev4V33X7MRQitfJBo4tP5hZfi9uRX89np5zP1CFE7AtY=
 =yEPW
 -----END PGP SIGNATURE-----
Merge tag 'kvm-4.20-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Radim Krčmář:
 "ARM:
   - Improved guest IPA space support (32 to 52 bits)
   - RAS event delivery for 32bit
   - PMU fixes
   - Guest entry hardening
   - Various cleanups
   - Port of dirty_log_test selftest
  PPC:
   - Nested HV KVM support for radix guests on POWER9. The performance
     is much better than with PR KVM. Migration and arbitrary level of
     nesting is supported.
   - Disable nested HV-KVM on early POWER9 chips that need a particular
     hardware bug workaround
   - One VM per core mode to prevent potential data leaks
   - PCI pass-through optimization
   - merge ppc-kvm topic branch and kvm-ppc-fixes to get a better base
  s390:
   - Initial version of AP crypto virtualization via vfio-mdev
   - Improvement for vfio-ap
   - Set the host program identifier
   - Optimize page table locking
  x86:
   - Enable nested virtualization by default
   - Implement Hyper-V IPI hypercalls
   - Improve #PF and #DB handling
   - Allow guests to use Enlightened VMCS
   - Add migration selftests for VMCS and Enlightened VMCS
   - Allow coalesced PIO accesses
   - Add an option to perform nested VMCS host state consistency check
     through hardware
   - Automatic tuning of lapic_timer_advance_ns
   - Many fixes, minor improvements, and cleanups"
* tag 'kvm-4.20-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (204 commits)
  KVM/nVMX: Do not validate that posted_intr_desc_addr is page aligned
  Revert "kvm: x86: optimize dr6 restore"
  KVM: PPC: Optimize clearing TCEs for sparse tables
  x86/kvm/nVMX: tweak shadow fields
  selftests/kvm: add missing executables to .gitignore
  KVM: arm64: Safety check PSTATE when entering guest and handle IL
  KVM: PPC: Book3S HV: Don't use streamlined entry path on early POWER9 chips
  arm/arm64: KVM: Enable 32 bits kvm vcpu events support
  arm/arm64: KVM: Rename function kvm_arch_dev_ioctl_check_extension()
  KVM: arm64: Fix caching of host MDCR_EL2 value
  KVM: VMX: enable nested virtualization by default
  KVM/x86: Use 32bit xor to clear registers in svm.c
  kvm: x86: Introduce KVM_CAP_EXCEPTION_PAYLOAD
  kvm: vmx: Defer setting of DR6 until #DB delivery
  kvm: x86: Defer setting of CR2 until #PF delivery
  kvm: x86: Add payload operands to kvm_multiple_exception
  kvm: x86: Add exception payload fields to kvm_vcpu_events
  kvm: x86: Add has_payload and payload to kvm_queued_exception
  KVM: Documentation: Fix omission in struct kvm_vcpu_events
  KVM: selftests: add Enlightened VMCS test
  ...
		
	
			
		
			
				
	
	
		
			343 lines
		
	
	
	
		
			12 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			343 lines
		
	
	
	
		
			12 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/*
 | 
						|
 * Copyright (C) 2012,2013 - ARM Ltd
 | 
						|
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 | 
						|
 *
 | 
						|
 * This program is free software; you can redistribute it and/or modify
 | 
						|
 * it under the terms of the GNU General Public License version 2 as
 | 
						|
 * published by the Free Software Foundation.
 | 
						|
 *
 | 
						|
 * This program is distributed in the hope that it will be useful,
 | 
						|
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
						|
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
						|
 * GNU General Public License for more details.
 | 
						|
 *
 | 
						|
 * You should have received a copy of the GNU General Public License
 | 
						|
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 | 
						|
 */
 | 
						|
 | 
						|
#ifndef __ARM64_KVM_ARM_H__
 | 
						|
#define __ARM64_KVM_ARM_H__
 | 
						|
 | 
						|
#include <asm/esr.h>
 | 
						|
#include <asm/memory.h>
 | 
						|
#include <asm/types.h>
 | 
						|
 | 
						|
/* Hyp Configuration Register (HCR) bits */
 | 
						|
#define HCR_FWB		(UL(1) << 46)
 | 
						|
#define HCR_TEA		(UL(1) << 37)
 | 
						|
#define HCR_TERR	(UL(1) << 36)
 | 
						|
#define HCR_TLOR	(UL(1) << 35)
 | 
						|
#define HCR_E2H		(UL(1) << 34)
 | 
						|
#define HCR_ID		(UL(1) << 33)
 | 
						|
#define HCR_CD		(UL(1) << 32)
 | 
						|
#define HCR_RW_SHIFT	31
 | 
						|
#define HCR_RW		(UL(1) << HCR_RW_SHIFT)
 | 
						|
#define HCR_TRVM	(UL(1) << 30)
 | 
						|
#define HCR_HCD		(UL(1) << 29)
 | 
						|
#define HCR_TDZ		(UL(1) << 28)
 | 
						|
#define HCR_TGE		(UL(1) << 27)
 | 
						|
#define HCR_TVM		(UL(1) << 26)
 | 
						|
#define HCR_TTLB	(UL(1) << 25)
 | 
						|
#define HCR_TPU		(UL(1) << 24)
 | 
						|
#define HCR_TPC		(UL(1) << 23)
 | 
						|
#define HCR_TSW		(UL(1) << 22)
 | 
						|
#define HCR_TAC		(UL(1) << 21)
 | 
						|
#define HCR_TIDCP	(UL(1) << 20)
 | 
						|
#define HCR_TSC		(UL(1) << 19)
 | 
						|
#define HCR_TID3	(UL(1) << 18)
 | 
						|
#define HCR_TID2	(UL(1) << 17)
 | 
						|
#define HCR_TID1	(UL(1) << 16)
 | 
						|
#define HCR_TID0	(UL(1) << 15)
 | 
						|
#define HCR_TWE		(UL(1) << 14)
 | 
						|
#define HCR_TWI		(UL(1) << 13)
 | 
						|
#define HCR_DC		(UL(1) << 12)
 | 
						|
#define HCR_BSU		(3 << 10)
 | 
						|
#define HCR_BSU_IS	(UL(1) << 10)
 | 
						|
#define HCR_FB		(UL(1) << 9)
 | 
						|
#define HCR_VSE		(UL(1) << 8)
 | 
						|
#define HCR_VI		(UL(1) << 7)
 | 
						|
#define HCR_VF		(UL(1) << 6)
 | 
						|
#define HCR_AMO		(UL(1) << 5)
 | 
						|
#define HCR_IMO		(UL(1) << 4)
 | 
						|
#define HCR_FMO		(UL(1) << 3)
 | 
						|
#define HCR_PTW		(UL(1) << 2)
 | 
						|
#define HCR_SWIO	(UL(1) << 1)
 | 
						|
#define HCR_VM		(UL(1) << 0)
 | 
						|
 | 
						|
/*
 | 
						|
 * The bits we set in HCR:
 | 
						|
 * TLOR:	Trap LORegion register accesses
 | 
						|
 * RW:		64bit by default, can be overridden for 32bit VMs
 | 
						|
 * TAC:		Trap ACTLR
 | 
						|
 * TSC:		Trap SMC
 | 
						|
 * TVM:		Trap VM ops (until M+C set in SCTLR_EL1)
 | 
						|
 * TSW:		Trap cache operations by set/way
 | 
						|
 * TWE:		Trap WFE
 | 
						|
 * TWI:		Trap WFI
 | 
						|
 * TIDCP:	Trap L2CTLR/L2ECTLR
 | 
						|
 * BSU_IS:	Upgrade barriers to the inner shareable domain
 | 
						|
 * FB:		Force broadcast of all maintainance operations
 | 
						|
 * AMO:		Override CPSR.A and enable signaling with VA
 | 
						|
 * IMO:		Override CPSR.I and enable signaling with VI
 | 
						|
 * FMO:		Override CPSR.F and enable signaling with VF
 | 
						|
 * SWIO:	Turn set/way invalidates into set/way clean+invalidate
 | 
						|
 */
 | 
						|
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
 | 
						|
			 HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
 | 
						|
			 HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
 | 
						|
			 HCR_FMO | HCR_IMO)
 | 
						|
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
 | 
						|
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
 | 
						|
 | 
						|
/* TCR_EL2 Registers bits */
 | 
						|
#define TCR_EL2_RES1		((1 << 31) | (1 << 23))
 | 
						|
#define TCR_EL2_TBI		(1 << 20)
 | 
						|
#define TCR_EL2_PS_SHIFT	16
 | 
						|
#define TCR_EL2_PS_MASK		(7 << TCR_EL2_PS_SHIFT)
 | 
						|
#define TCR_EL2_PS_40B		(2 << TCR_EL2_PS_SHIFT)
 | 
						|
#define TCR_EL2_TG0_MASK	TCR_TG0_MASK
 | 
						|
#define TCR_EL2_SH0_MASK	TCR_SH0_MASK
 | 
						|
#define TCR_EL2_ORGN0_MASK	TCR_ORGN0_MASK
 | 
						|
#define TCR_EL2_IRGN0_MASK	TCR_IRGN0_MASK
 | 
						|
#define TCR_EL2_T0SZ_MASK	0x3f
 | 
						|
#define TCR_EL2_MASK	(TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \
 | 
						|
			 TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
 | 
						|
 | 
						|
/* VTCR_EL2 Registers bits */
 | 
						|
#define VTCR_EL2_RES1		(1 << 31)
 | 
						|
#define VTCR_EL2_HD		(1 << 22)
 | 
						|
#define VTCR_EL2_HA		(1 << 21)
 | 
						|
#define VTCR_EL2_PS_SHIFT	TCR_EL2_PS_SHIFT
 | 
						|
#define VTCR_EL2_PS_MASK	TCR_EL2_PS_MASK
 | 
						|
#define VTCR_EL2_TG0_MASK	TCR_TG0_MASK
 | 
						|
#define VTCR_EL2_TG0_4K		TCR_TG0_4K
 | 
						|
#define VTCR_EL2_TG0_16K	TCR_TG0_16K
 | 
						|
#define VTCR_EL2_TG0_64K	TCR_TG0_64K
 | 
						|
#define VTCR_EL2_SH0_MASK	TCR_SH0_MASK
 | 
						|
#define VTCR_EL2_SH0_INNER	TCR_SH0_INNER
 | 
						|
#define VTCR_EL2_ORGN0_MASK	TCR_ORGN0_MASK
 | 
						|
#define VTCR_EL2_ORGN0_WBWA	TCR_ORGN0_WBWA
 | 
						|
#define VTCR_EL2_IRGN0_MASK	TCR_IRGN0_MASK
 | 
						|
#define VTCR_EL2_IRGN0_WBWA	TCR_IRGN0_WBWA
 | 
						|
#define VTCR_EL2_SL0_SHIFT	6
 | 
						|
#define VTCR_EL2_SL0_MASK	(3 << VTCR_EL2_SL0_SHIFT)
 | 
						|
#define VTCR_EL2_T0SZ_MASK	0x3f
 | 
						|
#define VTCR_EL2_VS_SHIFT	19
 | 
						|
#define VTCR_EL2_VS_8BIT	(0 << VTCR_EL2_VS_SHIFT)
 | 
						|
#define VTCR_EL2_VS_16BIT	(1 << VTCR_EL2_VS_SHIFT)
 | 
						|
 | 
						|
#define VTCR_EL2_T0SZ(x)	TCR_T0SZ(x)
 | 
						|
 | 
						|
/*
 | 
						|
 * We configure the Stage-2 page tables to always restrict the IPA space to be
 | 
						|
 * 40 bits wide (T0SZ = 24).  Systems with a PARange smaller than 40 bits are
 | 
						|
 * not known to exist and will break with this configuration.
 | 
						|
 *
 | 
						|
 * The VTCR_EL2 is configured per VM and is initialised in kvm_arm_setup_stage2().
 | 
						|
 *
 | 
						|
 * Note that when using 4K pages, we concatenate two first level page tables
 | 
						|
 * together. With 16K pages, we concatenate 16 first level page tables.
 | 
						|
 *
 | 
						|
 */
 | 
						|
 | 
						|
#define VTCR_EL2_COMMON_BITS	(VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
 | 
						|
				 VTCR_EL2_IRGN0_WBWA | VTCR_EL2_RES1)
 | 
						|
 | 
						|
/*
 | 
						|
 * VTCR_EL2:SL0 indicates the entry level for Stage2 translation.
 | 
						|
 * Interestingly, it depends on the page size.
 | 
						|
 * See D.10.2.121, VTCR_EL2, in ARM DDI 0487C.a
 | 
						|
 *
 | 
						|
 *	-----------------------------------------
 | 
						|
 *	| Entry level		|  4K  | 16K/64K |
 | 
						|
 *	------------------------------------------
 | 
						|
 *	| Level: 0		|  2   |   -     |
 | 
						|
 *	------------------------------------------
 | 
						|
 *	| Level: 1		|  1   |   2     |
 | 
						|
 *	------------------------------------------
 | 
						|
 *	| Level: 2		|  0   |   1     |
 | 
						|
 *	------------------------------------------
 | 
						|
 *	| Level: 3		|  -   |   0     |
 | 
						|
 *	------------------------------------------
 | 
						|
 *
 | 
						|
 * The table roughly translates to :
 | 
						|
 *
 | 
						|
 *	SL0(PAGE_SIZE, Entry_level) = TGRAN_SL0_BASE - Entry_Level
 | 
						|
 *
 | 
						|
 * Where TGRAN_SL0_BASE is a magic number depending on the page size:
 | 
						|
 * 	TGRAN_SL0_BASE(4K) = 2
 | 
						|
 *	TGRAN_SL0_BASE(16K) = 3
 | 
						|
 *	TGRAN_SL0_BASE(64K) = 3
 | 
						|
 * provided we take care of ruling out the unsupported cases and
 | 
						|
 * Entry_Level = 4 - Number_of_levels.
 | 
						|
 *
 | 
						|
 */
 | 
						|
#ifdef CONFIG_ARM64_64K_PAGES
 | 
						|
 | 
						|
#define VTCR_EL2_TGRAN			VTCR_EL2_TG0_64K
 | 
						|
#define VTCR_EL2_TGRAN_SL0_BASE		3UL
 | 
						|
 | 
						|
#elif defined(CONFIG_ARM64_16K_PAGES)
 | 
						|
 | 
						|
#define VTCR_EL2_TGRAN			VTCR_EL2_TG0_16K
 | 
						|
#define VTCR_EL2_TGRAN_SL0_BASE		3UL
 | 
						|
 | 
						|
#else	/* 4K */
 | 
						|
 | 
						|
#define VTCR_EL2_TGRAN			VTCR_EL2_TG0_4K
 | 
						|
#define VTCR_EL2_TGRAN_SL0_BASE		2UL
 | 
						|
 | 
						|
#endif
 | 
						|
 | 
						|
#define VTCR_EL2_LVLS_TO_SL0(levels)	\
 | 
						|
	((VTCR_EL2_TGRAN_SL0_BASE - (4 - (levels))) << VTCR_EL2_SL0_SHIFT)
 | 
						|
#define VTCR_EL2_SL0_TO_LVLS(sl0)	\
 | 
						|
	((sl0) + 4 - VTCR_EL2_TGRAN_SL0_BASE)
 | 
						|
#define VTCR_EL2_LVLS(vtcr)		\
 | 
						|
	VTCR_EL2_SL0_TO_LVLS(((vtcr) & VTCR_EL2_SL0_MASK) >> VTCR_EL2_SL0_SHIFT)
 | 
						|
 | 
						|
#define VTCR_EL2_FLAGS			(VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN)
 | 
						|
#define VTCR_EL2_IPA(vtcr)		(64 - ((vtcr) & VTCR_EL2_T0SZ_MASK))
 | 
						|
 | 
						|
/*
 | 
						|
 * ARM VMSAv8-64 defines an algorithm for finding the translation table
 | 
						|
 * descriptors in section D4.2.8 in ARM DDI 0487C.a.
 | 
						|
 *
 | 
						|
 * The algorithm defines the expectations on the translation table
 | 
						|
 * addresses for each level, based on PAGE_SIZE, entry level
 | 
						|
 * and the translation table size (T0SZ). The variable "x" in the
 | 
						|
 * algorithm determines the alignment of a table base address at a given
 | 
						|
 * level and thus determines the alignment of VTTBR:BADDR for stage2
 | 
						|
 * page table entry level.
 | 
						|
 * Since the number of bits resolved at the entry level could vary
 | 
						|
 * depending on the T0SZ, the value of "x" is defined based on a
 | 
						|
 * Magic constant for a given PAGE_SIZE and Entry Level. The
 | 
						|
 * intermediate levels must be always aligned to the PAGE_SIZE (i.e,
 | 
						|
 * x = PAGE_SHIFT).
 | 
						|
 *
 | 
						|
 * The value of "x" for entry level is calculated as :
 | 
						|
 *    x = Magic_N - T0SZ
 | 
						|
 *
 | 
						|
 * where Magic_N is an integer depending on the page size and the entry
 | 
						|
 * level of the page table as below:
 | 
						|
 *
 | 
						|
 *	--------------------------------------------
 | 
						|
 *	| Entry level		|  4K    16K   64K |
 | 
						|
 *	--------------------------------------------
 | 
						|
 *	| Level: 0 (4 levels)	| 28   |  -  |  -  |
 | 
						|
 *	--------------------------------------------
 | 
						|
 *	| Level: 1 (3 levels)	| 37   | 31  | 25  |
 | 
						|
 *	--------------------------------------------
 | 
						|
 *	| Level: 2 (2 levels)	| 46   | 42  | 38  |
 | 
						|
 *	--------------------------------------------
 | 
						|
 *	| Level: 3 (1 level)	| -    | 53  | 51  |
 | 
						|
 *	--------------------------------------------
 | 
						|
 *
 | 
						|
 * We have a magic formula for the Magic_N below:
 | 
						|
 *
 | 
						|
 *  Magic_N(PAGE_SIZE, Level) = 64 - ((PAGE_SHIFT - 3) * Number_of_levels)
 | 
						|
 *
 | 
						|
 * where Number_of_levels = (4 - Level). We are only interested in the
 | 
						|
 * value for Entry_Level for the stage2 page table.
 | 
						|
 *
 | 
						|
 * So, given that T0SZ = (64 - IPA_SHIFT), we can compute 'x' as follows:
 | 
						|
 *
 | 
						|
 *	x = (64 - ((PAGE_SHIFT - 3) * Number_of_levels)) - (64 - IPA_SHIFT)
 | 
						|
 *	  = IPA_SHIFT - ((PAGE_SHIFT - 3) * Number of levels)
 | 
						|
 *
 | 
						|
 * Here is one way to explain the Magic Formula:
 | 
						|
 *
 | 
						|
 *  x = log2(Size_of_Entry_Level_Table)
 | 
						|
 *
 | 
						|
 * Since, we can resolve (PAGE_SHIFT - 3) bits at each level, and another
 | 
						|
 * PAGE_SHIFT bits in the PTE, we have :
 | 
						|
 *
 | 
						|
 *  Bits_Entry_level = IPA_SHIFT - ((PAGE_SHIFT - 3) * (n - 1) + PAGE_SHIFT)
 | 
						|
 *		     = IPA_SHIFT - (PAGE_SHIFT - 3) * n - 3
 | 
						|
 *  where n = number of levels, and since each pointer is 8bytes, we have:
 | 
						|
 *
 | 
						|
 *  x = Bits_Entry_Level + 3
 | 
						|
 *    = IPA_SHIFT - (PAGE_SHIFT - 3) * n
 | 
						|
 *
 | 
						|
 * The only constraint here is that, we have to find the number of page table
 | 
						|
 * levels for a given IPA size (which we do, see stage2_pt_levels())
 | 
						|
 */
 | 
						|
#define ARM64_VTTBR_X(ipa, levels)	((ipa) - ((levels) * (PAGE_SHIFT - 3)))
 | 
						|
 | 
						|
#define VTTBR_CNP_BIT     (UL(1))
 | 
						|
#define VTTBR_VMID_SHIFT  (UL(48))
 | 
						|
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
 | 
						|
 | 
						|
/* Hyp System Trap Register */
 | 
						|
#define HSTR_EL2_T(x)	(1 << x)
 | 
						|
 | 
						|
/* Hyp Coprocessor Trap Register Shifts */
 | 
						|
#define CPTR_EL2_TFP_SHIFT 10
 | 
						|
 | 
						|
/* Hyp Coprocessor Trap Register */
 | 
						|
#define CPTR_EL2_TCPAC	(1 << 31)
 | 
						|
#define CPTR_EL2_TTA	(1 << 20)
 | 
						|
#define CPTR_EL2_TFP	(1 << CPTR_EL2_TFP_SHIFT)
 | 
						|
#define CPTR_EL2_TZ	(1 << 8)
 | 
						|
#define CPTR_EL2_RES1	0x000032ff /* known RES1 bits in CPTR_EL2 */
 | 
						|
#define CPTR_EL2_DEFAULT	CPTR_EL2_RES1
 | 
						|
 | 
						|
/* Hyp Debug Configuration Register bits */
 | 
						|
#define MDCR_EL2_TPMS		(1 << 14)
 | 
						|
#define MDCR_EL2_E2PB_MASK	(UL(0x3))
 | 
						|
#define MDCR_EL2_E2PB_SHIFT	(UL(12))
 | 
						|
#define MDCR_EL2_TDRA		(1 << 11)
 | 
						|
#define MDCR_EL2_TDOSA		(1 << 10)
 | 
						|
#define MDCR_EL2_TDA		(1 << 9)
 | 
						|
#define MDCR_EL2_TDE		(1 << 8)
 | 
						|
#define MDCR_EL2_HPME		(1 << 7)
 | 
						|
#define MDCR_EL2_TPM		(1 << 6)
 | 
						|
#define MDCR_EL2_TPMCR		(1 << 5)
 | 
						|
#define MDCR_EL2_HPMN_MASK	(0x1F)
 | 
						|
 | 
						|
/* For compatibility with fault code shared with 32-bit */
 | 
						|
#define FSC_FAULT	ESR_ELx_FSC_FAULT
 | 
						|
#define FSC_ACCESS	ESR_ELx_FSC_ACCESS
 | 
						|
#define FSC_PERM	ESR_ELx_FSC_PERM
 | 
						|
#define FSC_SEA		ESR_ELx_FSC_EXTABT
 | 
						|
#define FSC_SEA_TTW0	(0x14)
 | 
						|
#define FSC_SEA_TTW1	(0x15)
 | 
						|
#define FSC_SEA_TTW2	(0x16)
 | 
						|
#define FSC_SEA_TTW3	(0x17)
 | 
						|
#define FSC_SECC	(0x18)
 | 
						|
#define FSC_SECC_TTW0	(0x1c)
 | 
						|
#define FSC_SECC_TTW1	(0x1d)
 | 
						|
#define FSC_SECC_TTW2	(0x1e)
 | 
						|
#define FSC_SECC_TTW3	(0x1f)
 | 
						|
 | 
						|
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
 | 
						|
#define HPFAR_MASK	(~UL(0xf))
 | 
						|
/*
 | 
						|
 * We have
 | 
						|
 *	PAR	[PA_Shift - 1	: 12] = PA	[PA_Shift - 1 : 12]
 | 
						|
 *	HPFAR	[PA_Shift - 9	: 4]  = FIPA	[PA_Shift - 1 : 12]
 | 
						|
 */
 | 
						|
#define PAR_TO_HPFAR(par)		\
 | 
						|
	(((par) & GENMASK_ULL(PHYS_MASK_SHIFT - 1, 12)) >> 8)
 | 
						|
 | 
						|
#define kvm_arm_exception_type	\
 | 
						|
	{0, "IRQ" }, 		\
 | 
						|
	{1, "TRAP" }
 | 
						|
 | 
						|
#define ECN(x) { ESR_ELx_EC_##x, #x }
 | 
						|
 | 
						|
#define kvm_arm_exception_class \
 | 
						|
	ECN(UNKNOWN), ECN(WFx), ECN(CP15_32), ECN(CP15_64), ECN(CP14_MR), \
 | 
						|
	ECN(CP14_LS), ECN(FP_ASIMD), ECN(CP10_ID), ECN(CP14_64), ECN(SVC64), \
 | 
						|
	ECN(HVC64), ECN(SMC64), ECN(SYS64), ECN(IMP_DEF), ECN(IABT_LOW), \
 | 
						|
	ECN(IABT_CUR), ECN(PC_ALIGN), ECN(DABT_LOW), ECN(DABT_CUR), \
 | 
						|
	ECN(SP_ALIGN), ECN(FP_EXC32), ECN(FP_EXC64), ECN(SERROR), \
 | 
						|
	ECN(BREAKPT_LOW), ECN(BREAKPT_CUR), ECN(SOFTSTP_LOW), \
 | 
						|
	ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
 | 
						|
	ECN(BKPT32), ECN(VECTOR32), ECN(BRK64)
 | 
						|
 | 
						|
#define CPACR_EL1_FPEN		(3 << 20)
 | 
						|
#define CPACR_EL1_TTA		(1 << 28)
 | 
						|
#define CPACR_EL1_DEFAULT	(CPACR_EL1_FPEN | CPACR_EL1_ZEN_EL1EN)
 | 
						|
 | 
						|
#endif /* __ARM64_KVM_ARM_H__ */
 |