mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	arm64: KVM: Fix stage-2 PGD allocation to have per-page refcounting
We're using __get_free_pages with to allocate the guest's stage-2
PGD. The standard behaviour of this function is to return a set of
pages where only the head page has a valid refcount.
This behaviour gets us into trouble when we're trying to increment
the refcount on a non-head page:
page:ffff7c00cfb693c0 count:0 mapcount:0 mapping:          (null) index:0x0
flags: 0x4000000000000000()
page dumped because: VM_BUG_ON_PAGE((*({ __attribute__((unused)) typeof((&page->_count)->counter) __var = ( typeof((&page->_count)->counter)) 0; (volatile typeof((&page->_count)->counter) *)&((&page->_count)->counter); })) <= 0)
BUG: failure at include/linux/mm.h:548/get_page()!
Kernel panic - not syncing: BUG!
CPU: 1 PID: 1695 Comm: kvm-vcpu-0 Not tainted 4.0.0-rc1+ #3825
Hardware name: APM X-Gene Mustang board (DT)
Call trace:
[<ffff80000008a09c>] dump_backtrace+0x0/0x13c
[<ffff80000008a1e8>] show_stack+0x10/0x1c
[<ffff800000691da8>] dump_stack+0x74/0x94
[<ffff800000690d78>] panic+0x100/0x240
[<ffff8000000a0bc4>] stage2_get_pmd+0x17c/0x2bc
[<ffff8000000a1dc4>] kvm_handle_guest_abort+0x4b4/0x6b0
[<ffff8000000a420c>] handle_exit+0x58/0x180
[<ffff80000009e7a4>] kvm_arch_vcpu_ioctl_run+0x114/0x45c
[<ffff800000099df4>] kvm_vcpu_ioctl+0x2e0/0x754
[<ffff8000001c0a18>] do_vfs_ioctl+0x424/0x5c8
[<ffff8000001c0bfc>] SyS_ioctl+0x40/0x78
CPU0: stopping
A possible approach for this is to split the compound page using
split_page() at allocation time, and change the teardown path to
free one page at a time.  It turns out that alloc_pages_exact() and
free_pages_exact() does exactly that.
While we're at it, the PGD allocation code is reworked to reduce
duplication.
This has been tested on an X-Gene platform with a 4kB/48bit-VA host
kernel, and kvmtool hacked to place memory in the second page of
the hardware PGD (PUD for the host kernel). Also regression-tested
on a Cubietruck (Cortex-A7).
 [ Reworked to use alloc_pages_exact() and free_pages_exact() and to
   return pointers directly instead of by reference as arguments
    - Christoffer ]
Reported-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
			
			
This commit is contained in:
		
							parent
							
								
									bfb8fb4775
								
							
						
					
					
						commit
						a987370f8e
					
				
					 3 changed files with 58 additions and 67 deletions
				
			
		| 
						 | 
					@ -162,18 +162,16 @@ static inline bool kvm_page_empty(void *ptr)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define KVM_PREALLOC_LEVEL	0
 | 
					#define KVM_PREALLOC_LEVEL	0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void kvm_free_hwpgd(struct kvm *kvm) { }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void *kvm_get_hwpgd(struct kvm *kvm)
 | 
					static inline void *kvm_get_hwpgd(struct kvm *kvm)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return kvm->arch.pgd;
 | 
						return kvm->arch.pgd;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline unsigned int kvm_get_hwpgd_size(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return PTRS_PER_S2_PGD * sizeof(pgd_t);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct kvm;
 | 
					struct kvm;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define kvm_flush_dcache_to_poc(a,l)	__cpuc_flush_dcache_area((a), (l))
 | 
					#define kvm_flush_dcache_to_poc(a,l)	__cpuc_flush_dcache_area((a), (l))
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -632,6 +632,20 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
 | 
				
			||||||
				     __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
 | 
									     __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* Free the HW pgd, one page at a time */
 | 
				
			||||||
 | 
					static void kvm_free_hwpgd(void *hwpgd)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						free_pages_exact(hwpgd, kvm_get_hwpgd_size());
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* Allocate the HW PGD, making sure that each page gets its own refcount */
 | 
				
			||||||
 | 
					static void *kvm_alloc_hwpgd(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						unsigned int size = kvm_get_hwpgd_size();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
 | 
					 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
 | 
				
			||||||
 * @kvm:	The KVM struct pointer for the VM.
 | 
					 * @kvm:	The KVM struct pointer for the VM.
 | 
				
			||||||
| 
						 | 
					@ -645,15 +659,31 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
int kvm_alloc_stage2_pgd(struct kvm *kvm)
 | 
					int kvm_alloc_stage2_pgd(struct kvm *kvm)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int ret;
 | 
					 | 
				
			||||||
	pgd_t *pgd;
 | 
						pgd_t *pgd;
 | 
				
			||||||
 | 
						void *hwpgd;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (kvm->arch.pgd != NULL) {
 | 
						if (kvm->arch.pgd != NULL) {
 | 
				
			||||||
		kvm_err("kvm_arch already initialized?\n");
 | 
							kvm_err("kvm_arch already initialized?\n");
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						hwpgd = kvm_alloc_hwpgd();
 | 
				
			||||||
 | 
						if (!hwpgd)
 | 
				
			||||||
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* When the kernel uses more levels of page tables than the
 | 
				
			||||||
 | 
						 * guest, we allocate a fake PGD and pre-populate it to point
 | 
				
			||||||
 | 
						 * to the next-level page table, which will be the real
 | 
				
			||||||
 | 
						 * initial page table pointed to by the VTTBR.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * When KVM_PREALLOC_LEVEL==2, we allocate a single page for
 | 
				
			||||||
 | 
						 * the PMD and the kernel will use folded pud.
 | 
				
			||||||
 | 
						 * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
 | 
				
			||||||
 | 
						 * pages.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
	if (KVM_PREALLOC_LEVEL > 0) {
 | 
						if (KVM_PREALLOC_LEVEL > 0) {
 | 
				
			||||||
 | 
							int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * Allocate fake pgd for the page table manipulation macros to
 | 
							 * Allocate fake pgd for the page table manipulation macros to
 | 
				
			||||||
		 * work.  This is not used by the hardware and we have no
 | 
							 * work.  This is not used by the hardware and we have no
 | 
				
			||||||
| 
						 | 
					@ -661,30 +691,32 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
 | 
							pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
 | 
				
			||||||
				       GFP_KERNEL | __GFP_ZERO);
 | 
									       GFP_KERNEL | __GFP_ZERO);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (!pgd) {
 | 
				
			||||||
 | 
								kvm_free_hwpgd(hwpgd);
 | 
				
			||||||
 | 
								return -ENOMEM;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							/* Plug the HW PGD into the fake one. */
 | 
				
			||||||
 | 
							for (i = 0; i < PTRS_PER_S2_PGD; i++) {
 | 
				
			||||||
 | 
								if (KVM_PREALLOC_LEVEL == 1)
 | 
				
			||||||
 | 
									pgd_populate(NULL, pgd + i,
 | 
				
			||||||
 | 
										     (pud_t *)hwpgd + i * PTRS_PER_PUD);
 | 
				
			||||||
 | 
								else if (KVM_PREALLOC_LEVEL == 2)
 | 
				
			||||||
 | 
									pud_populate(NULL, pud_offset(pgd, 0) + i,
 | 
				
			||||||
 | 
										     (pmd_t *)hwpgd + i * PTRS_PER_PMD);
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * Allocate actual first-level Stage-2 page table used by the
 | 
							 * Allocate actual first-level Stage-2 page table used by the
 | 
				
			||||||
		 * hardware for Stage-2 page table walks.
 | 
							 * hardware for Stage-2 page table walks.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER);
 | 
							pgd = (pgd_t *)hwpgd;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!pgd)
 | 
					 | 
				
			||||||
		return -ENOMEM;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	ret = kvm_prealloc_hwpgd(kvm, pgd);
 | 
					 | 
				
			||||||
	if (ret)
 | 
					 | 
				
			||||||
		goto out_err;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	kvm_clean_pgd(pgd);
 | 
						kvm_clean_pgd(pgd);
 | 
				
			||||||
	kvm->arch.pgd = pgd;
 | 
						kvm->arch.pgd = pgd;
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
out_err:
 | 
					 | 
				
			||||||
	if (KVM_PREALLOC_LEVEL > 0)
 | 
					 | 
				
			||||||
		kfree(pgd);
 | 
					 | 
				
			||||||
	else
 | 
					 | 
				
			||||||
		free_pages((unsigned long)pgd, S2_PGD_ORDER);
 | 
					 | 
				
			||||||
	return ret;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					@ -785,11 +817,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
 | 
						unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
 | 
				
			||||||
	kvm_free_hwpgd(kvm);
 | 
						kvm_free_hwpgd(kvm_get_hwpgd(kvm));
 | 
				
			||||||
	if (KVM_PREALLOC_LEVEL > 0)
 | 
						if (KVM_PREALLOC_LEVEL > 0)
 | 
				
			||||||
		kfree(kvm->arch.pgd);
 | 
							kfree(kvm->arch.pgd);
 | 
				
			||||||
	else
 | 
					
 | 
				
			||||||
		free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
 | 
					 | 
				
			||||||
	kvm->arch.pgd = NULL;
 | 
						kvm->arch.pgd = NULL;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -171,43 +171,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
 | 
				
			||||||
#define KVM_PREALLOC_LEVEL	(0)
 | 
					#define KVM_PREALLOC_LEVEL	(0)
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					 | 
				
			||||||
 * kvm_prealloc_hwpgd - allocate inital table for VTTBR
 | 
					 | 
				
			||||||
 * @kvm:	The KVM struct pointer for the VM.
 | 
					 | 
				
			||||||
 * @pgd:	The kernel pseudo pgd
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * When the kernel uses more levels of page tables than the guest, we allocate
 | 
					 | 
				
			||||||
 * a fake PGD and pre-populate it to point to the next-level page table, which
 | 
					 | 
				
			||||||
 * will be the real initial page table pointed to by the VTTBR.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and
 | 
					 | 
				
			||||||
 * the kernel will use folded pud.  When KVM_PREALLOC_LEVEL==1, we
 | 
					 | 
				
			||||||
 * allocate 2 consecutive PUD pages.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	unsigned int i;
 | 
					 | 
				
			||||||
	unsigned long hwpgd;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (KVM_PREALLOC_LEVEL == 0)
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT);
 | 
					 | 
				
			||||||
	if (!hwpgd)
 | 
					 | 
				
			||||||
		return -ENOMEM;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < PTRS_PER_S2_PGD; i++) {
 | 
					 | 
				
			||||||
		if (KVM_PREALLOC_LEVEL == 1)
 | 
					 | 
				
			||||||
			pgd_populate(NULL, pgd + i,
 | 
					 | 
				
			||||||
				     (pud_t *)hwpgd + i * PTRS_PER_PUD);
 | 
					 | 
				
			||||||
		else if (KVM_PREALLOC_LEVEL == 2)
 | 
					 | 
				
			||||||
			pud_populate(NULL, pud_offset(pgd, 0) + i,
 | 
					 | 
				
			||||||
				     (pmd_t *)hwpgd + i * PTRS_PER_PMD);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void *kvm_get_hwpgd(struct kvm *kvm)
 | 
					static inline void *kvm_get_hwpgd(struct kvm *kvm)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	pgd_t *pgd = kvm->arch.pgd;
 | 
						pgd_t *pgd = kvm->arch.pgd;
 | 
				
			||||||
| 
						 | 
					@ -224,12 +187,11 @@ static inline void *kvm_get_hwpgd(struct kvm *kvm)
 | 
				
			||||||
	return pmd_offset(pud, 0);
 | 
						return pmd_offset(pud, 0);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void kvm_free_hwpgd(struct kvm *kvm)
 | 
					static inline unsigned int kvm_get_hwpgd_size(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (KVM_PREALLOC_LEVEL > 0) {
 | 
						if (KVM_PREALLOC_LEVEL > 0)
 | 
				
			||||||
		unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm);
 | 
							return PTRS_PER_S2_PGD * PAGE_SIZE;
 | 
				
			||||||
		free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT);
 | 
						return PTRS_PER_S2_PGD * sizeof(pgd_t);
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline bool kvm_page_empty(void *ptr)
 | 
					static inline bool kvm_page_empty(void *ptr)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue