forked from mirrors/linux
		
	We still have a stray quicklist header included even though we axed quicklist usage quite a while back. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <201005241913.o4OJDJe9010881@imap1.linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
		
			
				
	
	
		
			134 lines
		
	
	
	
		
			3.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			134 lines
		
	
	
	
		
			3.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
#include <linux/sched.h>
 | 
						|
#include <linux/kernel.h>
 | 
						|
#include <linux/errno.h>
 | 
						|
#include <linux/mm.h>
 | 
						|
#include <linux/nmi.h>
 | 
						|
#include <linux/swap.h>
 | 
						|
#include <linux/smp.h>
 | 
						|
#include <linux/highmem.h>
 | 
						|
#include <linux/pagemap.h>
 | 
						|
#include <linux/spinlock.h>
 | 
						|
#include <linux/module.h>
 | 
						|
 | 
						|
#include <asm/system.h>
 | 
						|
#include <asm/pgtable.h>
 | 
						|
#include <asm/pgalloc.h>
 | 
						|
#include <asm/fixmap.h>
 | 
						|
#include <asm/e820.h>
 | 
						|
#include <asm/tlb.h>
 | 
						|
#include <asm/tlbflush.h>
 | 
						|
#include <asm/io.h>
 | 
						|
 | 
						|
unsigned int __VMALLOC_RESERVE = 128 << 20;
 | 
						|
 | 
						|
/*
 | 
						|
 * Associate a virtual page frame with a given physical page frame 
 | 
						|
 * and protection flags for that frame.
 | 
						|
 */ 
 | 
						|
void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
 | 
						|
{
 | 
						|
	pgd_t *pgd;
 | 
						|
	pud_t *pud;
 | 
						|
	pmd_t *pmd;
 | 
						|
	pte_t *pte;
 | 
						|
 | 
						|
	pgd = swapper_pg_dir + pgd_index(vaddr);
 | 
						|
	if (pgd_none(*pgd)) {
 | 
						|
		BUG();
 | 
						|
		return;
 | 
						|
	}
 | 
						|
	pud = pud_offset(pgd, vaddr);
 | 
						|
	if (pud_none(*pud)) {
 | 
						|
		BUG();
 | 
						|
		return;
 | 
						|
	}
 | 
						|
	pmd = pmd_offset(pud, vaddr);
 | 
						|
	if (pmd_none(*pmd)) {
 | 
						|
		BUG();
 | 
						|
		return;
 | 
						|
	}
 | 
						|
	pte = pte_offset_kernel(pmd, vaddr);
 | 
						|
	if (pte_val(pteval))
 | 
						|
		set_pte_at(&init_mm, vaddr, pte, pteval);
 | 
						|
	else
 | 
						|
		pte_clear(&init_mm, vaddr, pte);
 | 
						|
 | 
						|
	/*
 | 
						|
	 * It's enough to flush this one mapping.
 | 
						|
	 * (PGE mappings get flushed as well)
 | 
						|
	 */
 | 
						|
	__flush_tlb_one(vaddr);
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * Associate a large virtual page frame with a given physical page frame 
 | 
						|
 * and protection flags for that frame. pfn is for the base of the page,
 | 
						|
 * vaddr is what the page gets mapped to - both must be properly aligned. 
 | 
						|
 * The pmd must already be instantiated. Assumes PAE mode.
 | 
						|
 */ 
 | 
						|
void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
 | 
						|
{
 | 
						|
	pgd_t *pgd;
 | 
						|
	pud_t *pud;
 | 
						|
	pmd_t *pmd;
 | 
						|
 | 
						|
	if (vaddr & (PMD_SIZE-1)) {		/* vaddr is misaligned */
 | 
						|
		printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
 | 
						|
		return; /* BUG(); */
 | 
						|
	}
 | 
						|
	if (pfn & (PTRS_PER_PTE-1)) {		/* pfn is misaligned */
 | 
						|
		printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
 | 
						|
		return; /* BUG(); */
 | 
						|
	}
 | 
						|
	pgd = swapper_pg_dir + pgd_index(vaddr);
 | 
						|
	if (pgd_none(*pgd)) {
 | 
						|
		printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
 | 
						|
		return; /* BUG(); */
 | 
						|
	}
 | 
						|
	pud = pud_offset(pgd, vaddr);
 | 
						|
	pmd = pmd_offset(pud, vaddr);
 | 
						|
	set_pmd(pmd, pfn_pmd(pfn, flags));
 | 
						|
	/*
 | 
						|
	 * It's enough to flush this one mapping.
 | 
						|
	 * (PGE mappings get flushed as well)
 | 
						|
	 */
 | 
						|
	__flush_tlb_one(vaddr);
 | 
						|
}
 | 
						|
 | 
						|
unsigned long __FIXADDR_TOP = 0xfffff000;
 | 
						|
EXPORT_SYMBOL(__FIXADDR_TOP);
 | 
						|
 | 
						|
/*
 | 
						|
 * vmalloc=size forces the vmalloc area to be exactly 'size'
 | 
						|
 * bytes. This can be used to increase (or decrease) the
 | 
						|
 * vmalloc area - the default is 128m.
 | 
						|
 */
 | 
						|
static int __init parse_vmalloc(char *arg)
 | 
						|
{
 | 
						|
	if (!arg)
 | 
						|
		return -EINVAL;
 | 
						|
 | 
						|
	/* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/
 | 
						|
	__VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET;
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
early_param("vmalloc", parse_vmalloc);
 | 
						|
 | 
						|
/*
 | 
						|
 * reservetop=size reserves a hole at the top of the kernel address space which
 | 
						|
 * a hypervisor can load into later.  Needed for dynamically loaded hypervisors,
 | 
						|
 * so relocating the fixmap can be done before paging initialization.
 | 
						|
 */
 | 
						|
static int __init parse_reservetop(char *arg)
 | 
						|
{
 | 
						|
	unsigned long address;
 | 
						|
 | 
						|
	if (!arg)
 | 
						|
		return -EINVAL;
 | 
						|
 | 
						|
	address = memparse(arg, &arg);
 | 
						|
	reserve_top_address(address);
 | 
						|
	fixup_early_ioremap();
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
early_param("reservetop", parse_reservetop);
 |