mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	powerpc: Merge lmb.c and make MM initialization use it.
This also creates merged versions of do_init_bootmem, paging_init and mem_init and moves them to arch/powerpc/mm/mem.c. It gets rid of the mem_pieces stuff. I made memory_limit a parameter to lmb_enforce_memory_limit rather than a global referenced by that function. This will require some small changes to ppc64 if we want to continue building ARCH=ppc64 using the merged lmb.c. Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
		
							parent
							
								
									9b6b563c0d
								
							
						
					
					
						commit
						7c8c6b9776
					
				
					 11 changed files with 519 additions and 706 deletions
				
			
		| 
						 | 
				
			
			@ -2,9 +2,9 @@
 | 
			
		|||
# Makefile for the linux ppc-specific parts of the memory manager.
 | 
			
		||||
#
 | 
			
		||||
 | 
			
		||||
obj-y				:= fault.o mem.o
 | 
			
		||||
obj-y				:= fault.o mem.o lmb.o
 | 
			
		||||
obj-$(CONFIG_PPC32)		+= init.o pgtable.o mmu_context.o \
 | 
			
		||||
				   mem_pieces.o tlb.o
 | 
			
		||||
				   tlb.o
 | 
			
		||||
obj-$(CONFIG_PPC64)		+= init64.o pgtable64.o mmu_context64.o
 | 
			
		||||
obj-$(CONFIG_PPC_STD_MMU_32)	+= ppc_mmu.o hash_32.o
 | 
			
		||||
obj-$(CONFIG_40x)		+= 4xx_mmu.o
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -45,8 +45,9 @@
 | 
			
		|||
#include <asm/tlb.h>
 | 
			
		||||
#include <asm/bootinfo.h>
 | 
			
		||||
#include <asm/prom.h>
 | 
			
		||||
#include <asm/lmb.h>
 | 
			
		||||
#include <asm/sections.h>
 | 
			
		||||
 | 
			
		||||
#include "mem_pieces.h"
 | 
			
		||||
#include "mmu_decl.h"
 | 
			
		||||
 | 
			
		||||
#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
 | 
			
		||||
| 
						 | 
				
			
			@ -65,17 +66,11 @@ unsigned long total_lowmem;
 | 
			
		|||
unsigned long ppc_memstart;
 | 
			
		||||
unsigned long ppc_memoffset = PAGE_OFFSET;
 | 
			
		||||
 | 
			
		||||
int mem_init_done;
 | 
			
		||||
int init_bootmem_done;
 | 
			
		||||
int boot_mapsize;
 | 
			
		||||
#ifdef CONFIG_PPC_PMAC
 | 
			
		||||
unsigned long agp_special_page;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
extern char _end[];
 | 
			
		||||
extern char etext[], _stext[];
 | 
			
		||||
extern char __init_begin, __init_end;
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_HIGHMEM
 | 
			
		||||
pte_t *kmap_pte;
 | 
			
		||||
pgprot_t kmap_prot;
 | 
			
		||||
| 
						 | 
				
			
			@ -85,15 +80,15 @@ EXPORT_SYMBOL(kmap_pte);
 | 
			
		|||
#endif
 | 
			
		||||
 | 
			
		||||
void MMU_init(void);
 | 
			
		||||
void set_phys_avail(unsigned long total_ram);
 | 
			
		||||
 | 
			
		||||
/* XXX should be in current.h  -- paulus */
 | 
			
		||||
extern struct task_struct *current_set[NR_CPUS];
 | 
			
		||||
 | 
			
		||||
char *klimit = _end;
 | 
			
		||||
struct mem_pieces phys_avail;
 | 
			
		||||
struct device_node *memory_node;
 | 
			
		||||
 | 
			
		||||
extern int init_bootmem_done;
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * this tells the system to map all of ram with the segregs
 | 
			
		||||
 * (i.e. page tables) instead of the bats.
 | 
			
		||||
| 
						 | 
				
			
			@ -102,84 +97,14 @@ struct device_node *memory_node;
 | 
			
		|||
int __map_without_bats;
 | 
			
		||||
int __map_without_ltlbs;
 | 
			
		||||
 | 
			
		||||
/* max amount of RAM to use */
 | 
			
		||||
unsigned long __max_memory;
 | 
			
		||||
/* max amount of low RAM to map in */
 | 
			
		||||
unsigned long __max_low_memory = MAX_LOW_MEM;
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Read in a property describing some pieces of memory.
 | 
			
		||||
 * limit of what is accessible with initial MMU setup -
 | 
			
		||||
 * 256MB usually, but only 16MB on 601.
 | 
			
		||||
 */
 | 
			
		||||
static int __init get_mem_prop(char *name, struct mem_pieces *mp)
 | 
			
		||||
{
 | 
			
		||||
	struct reg_property *rp;
 | 
			
		||||
	int i, s;
 | 
			
		||||
	unsigned int *ip;
 | 
			
		||||
	int nac = prom_n_addr_cells(memory_node);
 | 
			
		||||
	int nsc = prom_n_size_cells(memory_node);
 | 
			
		||||
 | 
			
		||||
	ip = (unsigned int *) get_property(memory_node, name, &s);
 | 
			
		||||
	if (ip == NULL) {
 | 
			
		||||
		printk(KERN_ERR "error: couldn't get %s property on /memory\n",
 | 
			
		||||
		       name);
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
	s /= (nsc + nac) * 4;
 | 
			
		||||
	rp = mp->regions;
 | 
			
		||||
	for (i = 0; i < s; ++i, ip += nac+nsc) {
 | 
			
		||||
		if (nac >= 2 && ip[nac-2] != 0)
 | 
			
		||||
			continue;
 | 
			
		||||
		rp->address = ip[nac-1];
 | 
			
		||||
		if (nsc >= 2 && ip[nac+nsc-2] != 0)
 | 
			
		||||
			rp->size = ~0U;
 | 
			
		||||
		else
 | 
			
		||||
			rp->size = ip[nac+nsc-1];
 | 
			
		||||
		++rp;
 | 
			
		||||
	}
 | 
			
		||||
	mp->n_regions = rp - mp->regions;
 | 
			
		||||
 | 
			
		||||
	/* Make sure the pieces are sorted. */
 | 
			
		||||
	mem_pieces_sort(mp);
 | 
			
		||||
	mem_pieces_coalesce(mp);
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Collect information about physical RAM and which pieces are
 | 
			
		||||
 * already in use from the device tree.
 | 
			
		||||
 */
 | 
			
		||||
unsigned long __init find_end_of_memory(void)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long a, total;
 | 
			
		||||
	struct mem_pieces phys_mem;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Find out where physical memory is, and check that it
 | 
			
		||||
	 * starts at 0 and is contiguous.  It seems that RAM is
 | 
			
		||||
	 * always physically contiguous on Power Macintoshes.
 | 
			
		||||
	 *
 | 
			
		||||
	 * Supporting discontiguous physical memory isn't hard,
 | 
			
		||||
	 * it just makes the virtual <-> physical mapping functions
 | 
			
		||||
	 * more complicated (or else you end up wasting space
 | 
			
		||||
	 * in mem_map).
 | 
			
		||||
	 */
 | 
			
		||||
	memory_node = find_devices("memory");
 | 
			
		||||
	if (memory_node == NULL || !get_mem_prop("reg", &phys_mem)
 | 
			
		||||
	    || phys_mem.n_regions == 0)
 | 
			
		||||
		panic("No RAM??");
 | 
			
		||||
	a = phys_mem.regions[0].address;
 | 
			
		||||
	if (a != 0)
 | 
			
		||||
		panic("RAM doesn't start at physical address 0");
 | 
			
		||||
	total = phys_mem.regions[0].size;
 | 
			
		||||
 | 
			
		||||
	if (phys_mem.n_regions > 1) {
 | 
			
		||||
		printk("RAM starting at 0x%x is not contiguous\n",
 | 
			
		||||
		       phys_mem.regions[1].address);
 | 
			
		||||
		printk("Using RAM from 0 to 0x%lx\n", total-1);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return total;
 | 
			
		||||
}
 | 
			
		||||
unsigned long __initial_memory_limit = 0x10000000;
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Check for command-line options that affect what MMU_init will do.
 | 
			
		||||
| 
						 | 
				
			
			@ -194,27 +119,6 @@ void MMU_setup(void)
 | 
			
		|||
	if (strstr(cmd_line, "noltlbs")) {
 | 
			
		||||
		__map_without_ltlbs = 1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Look for mem= option on command line */
 | 
			
		||||
	if (strstr(cmd_line, "mem=")) {
 | 
			
		||||
		char *p, *q;
 | 
			
		||||
		unsigned long maxmem = 0;
 | 
			
		||||
 | 
			
		||||
		for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) {
 | 
			
		||||
			q = p + 4;
 | 
			
		||||
			if (p > cmd_line && p[-1] != ' ')
 | 
			
		||||
				continue;
 | 
			
		||||
			maxmem = simple_strtoul(q, &q, 0);
 | 
			
		||||
			if (*q == 'k' || *q == 'K') {
 | 
			
		||||
				maxmem <<= 10;
 | 
			
		||||
				++q;
 | 
			
		||||
			} else if (*q == 'm' || *q == 'M') {
 | 
			
		||||
				maxmem <<= 20;
 | 
			
		||||
				++q;
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		__max_memory = maxmem;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -227,23 +131,22 @@ void __init MMU_init(void)
 | 
			
		|||
	if (ppc_md.progress)
 | 
			
		||||
		ppc_md.progress("MMU:enter", 0x111);
 | 
			
		||||
 | 
			
		||||
	/* 601 can only access 16MB at the moment */
 | 
			
		||||
	if (PVR_VER(mfspr(SPRN_PVR)) == 1)
 | 
			
		||||
		__initial_memory_limit = 0x01000000;
 | 
			
		||||
 | 
			
		||||
	/* parse args from command line */
 | 
			
		||||
	MMU_setup();
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Figure out how much memory we have, how much
 | 
			
		||||
	 * is lowmem, and how much is highmem.  If we were
 | 
			
		||||
	 * passed the total memory size from the bootloader,
 | 
			
		||||
	 * just use it.
 | 
			
		||||
	 */
 | 
			
		||||
	if (boot_mem_size)
 | 
			
		||||
		total_memory = boot_mem_size;
 | 
			
		||||
	else
 | 
			
		||||
		total_memory = find_end_of_memory();
 | 
			
		||||
	if (lmb.memory.cnt > 1) {
 | 
			
		||||
		lmb.memory.cnt = 1;
 | 
			
		||||
		lmb_analyze();
 | 
			
		||||
		printk(KERN_WARNING "Only using first contiguous memory region");
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (__max_memory && total_memory > __max_memory)
 | 
			
		||||
		total_memory = __max_memory;
 | 
			
		||||
	total_memory = lmb_end_of_DRAM();
 | 
			
		||||
	total_lowmem = total_memory;
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_FSL_BOOKE
 | 
			
		||||
	/* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
 | 
			
		||||
	 * entries, so we need to adjust lowmem to match the amount we can map
 | 
			
		||||
| 
						 | 
				
			
			@ -256,7 +159,6 @@ void __init MMU_init(void)
 | 
			
		|||
		total_memory = total_lowmem;
 | 
			
		||||
#endif /* CONFIG_HIGHMEM */
 | 
			
		||||
	}
 | 
			
		||||
	set_phys_avail(total_lowmem);
 | 
			
		||||
 | 
			
		||||
	/* Initialize the MMU hardware */
 | 
			
		||||
	if (ppc_md.progress)
 | 
			
		||||
| 
						 | 
				
			
			@ -303,7 +205,8 @@ void __init *early_get_page(void)
 | 
			
		|||
	if (init_bootmem_done) {
 | 
			
		||||
		p = alloc_bootmem_pages(PAGE_SIZE);
 | 
			
		||||
	} else {
 | 
			
		||||
		p = mem_pieces_find(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
		p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
 | 
			
		||||
					__initial_memory_limit));
 | 
			
		||||
	}
 | 
			
		||||
	return p;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -353,229 +256,3 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Initialize the bootmem system and give it all the memory we
 | 
			
		||||
 * have available.
 | 
			
		||||
 */
 | 
			
		||||
void __init do_init_bootmem(void)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long start, size;
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Find an area to use for the bootmem bitmap.
 | 
			
		||||
	 * We look for the first area which is at least
 | 
			
		||||
	 * 128kB in length (128kB is enough for a bitmap
 | 
			
		||||
	 * for 4GB of memory, using 4kB pages), plus 1 page
 | 
			
		||||
	 * (in case the address isn't page-aligned).
 | 
			
		||||
	 */
 | 
			
		||||
	start = 0;
 | 
			
		||||
	size = 0;
 | 
			
		||||
	for (i = 0; i < phys_avail.n_regions; ++i) {
 | 
			
		||||
		unsigned long a = phys_avail.regions[i].address;
 | 
			
		||||
		unsigned long s = phys_avail.regions[i].size;
 | 
			
		||||
		if (s <= size)
 | 
			
		||||
			continue;
 | 
			
		||||
		start = a;
 | 
			
		||||
		size = s;
 | 
			
		||||
		if (s >= 33 * PAGE_SIZE)
 | 
			
		||||
			break;
 | 
			
		||||
	}
 | 
			
		||||
	start = PAGE_ALIGN(start);
 | 
			
		||||
 | 
			
		||||
	min_low_pfn = start >> PAGE_SHIFT;
 | 
			
		||||
	max_low_pfn = (PPC_MEMSTART + total_lowmem) >> PAGE_SHIFT;
 | 
			
		||||
	max_pfn = (PPC_MEMSTART + total_memory) >> PAGE_SHIFT;
 | 
			
		||||
	boot_mapsize = init_bootmem_node(&contig_page_data, min_low_pfn,
 | 
			
		||||
					 PPC_MEMSTART >> PAGE_SHIFT,
 | 
			
		||||
					 max_low_pfn);
 | 
			
		||||
 | 
			
		||||
	/* remove the bootmem bitmap from the available memory */
 | 
			
		||||
	mem_pieces_remove(&phys_avail, start, boot_mapsize, 1);
 | 
			
		||||
 | 
			
		||||
	/* add everything in phys_avail into the bootmem map */
 | 
			
		||||
	for (i = 0; i < phys_avail.n_regions; ++i)
 | 
			
		||||
		free_bootmem(phys_avail.regions[i].address,
 | 
			
		||||
			     phys_avail.regions[i].size);
 | 
			
		||||
 | 
			
		||||
	init_bootmem_done = 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * paging_init() sets up the page tables - in fact we've already done this.
 | 
			
		||||
 */
 | 
			
		||||
void __init paging_init(void)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long zones_size[MAX_NR_ZONES], i;
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_HIGHMEM
 | 
			
		||||
	map_page(PKMAP_BASE, 0, 0);	/* XXX gross */
 | 
			
		||||
	pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
 | 
			
		||||
			(PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
 | 
			
		||||
	map_page(KMAP_FIX_BEGIN, 0, 0);	/* XXX gross */
 | 
			
		||||
	kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
 | 
			
		||||
			(KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
 | 
			
		||||
	kmap_prot = PAGE_KERNEL;
 | 
			
		||||
#endif /* CONFIG_HIGHMEM */
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * All pages are DMA-able so we put them all in the DMA zone.
 | 
			
		||||
	 */
 | 
			
		||||
	zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
 | 
			
		||||
	for (i = 1; i < MAX_NR_ZONES; i++)
 | 
			
		||||
		zones_size[i] = 0;
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_HIGHMEM
 | 
			
		||||
	zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;
 | 
			
		||||
#endif /* CONFIG_HIGHMEM */
 | 
			
		||||
 | 
			
		||||
	free_area_init(zones_size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void __init mem_init(void)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long addr;
 | 
			
		||||
	int codepages = 0;
 | 
			
		||||
	int datapages = 0;
 | 
			
		||||
	int initpages = 0;
 | 
			
		||||
#ifdef CONFIG_HIGHMEM
 | 
			
		||||
	unsigned long highmem_mapnr;
 | 
			
		||||
 | 
			
		||||
	highmem_mapnr = total_lowmem >> PAGE_SHIFT;
 | 
			
		||||
#endif /* CONFIG_HIGHMEM */
 | 
			
		||||
	max_mapnr = total_memory >> PAGE_SHIFT;
 | 
			
		||||
 | 
			
		||||
	high_memory = (void *) __va(PPC_MEMSTART + total_lowmem);
 | 
			
		||||
	num_physpages = max_mapnr;	/* RAM is assumed contiguous */
 | 
			
		||||
 | 
			
		||||
	totalram_pages += free_all_bootmem();
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_BLK_DEV_INITRD
 | 
			
		||||
	/* if we are booted from BootX with an initial ramdisk,
 | 
			
		||||
	   make sure the ramdisk pages aren't reserved. */
 | 
			
		||||
	if (initrd_start) {
 | 
			
		||||
		for (addr = initrd_start; addr < initrd_end; addr += PAGE_SIZE)
 | 
			
		||||
			ClearPageReserved(virt_to_page(addr));
 | 
			
		||||
	}
 | 
			
		||||
#endif /* CONFIG_BLK_DEV_INITRD */
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_PPC_OF
 | 
			
		||||
	/* mark the RTAS pages as reserved */
 | 
			
		||||
	if ( rtas_data )
 | 
			
		||||
		for (addr = (ulong)__va(rtas_data);
 | 
			
		||||
		     addr < PAGE_ALIGN((ulong)__va(rtas_data)+rtas_size) ;
 | 
			
		||||
		     addr += PAGE_SIZE)
 | 
			
		||||
			SetPageReserved(virt_to_page(addr));
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef CONFIG_PPC_PMAC
 | 
			
		||||
	if (agp_special_page)
 | 
			
		||||
		SetPageReserved(virt_to_page(agp_special_page));
 | 
			
		||||
#endif
 | 
			
		||||
	for (addr = PAGE_OFFSET; addr < (unsigned long)high_memory;
 | 
			
		||||
	     addr += PAGE_SIZE) {
 | 
			
		||||
		if (!PageReserved(virt_to_page(addr)))
 | 
			
		||||
			continue;
 | 
			
		||||
		if (addr < (ulong) etext)
 | 
			
		||||
			codepages++;
 | 
			
		||||
		else if (addr >= (unsigned long)&__init_begin
 | 
			
		||||
			 && addr < (unsigned long)&__init_end)
 | 
			
		||||
			initpages++;
 | 
			
		||||
		else if (addr < (ulong) klimit)
 | 
			
		||||
			datapages++;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_HIGHMEM
 | 
			
		||||
	{
 | 
			
		||||
		unsigned long pfn;
 | 
			
		||||
 | 
			
		||||
		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
 | 
			
		||||
			struct page *page = mem_map + pfn;
 | 
			
		||||
 | 
			
		||||
			ClearPageReserved(page);
 | 
			
		||||
			set_page_count(page, 1);
 | 
			
		||||
			__free_page(page);
 | 
			
		||||
			totalhigh_pages++;
 | 
			
		||||
		}
 | 
			
		||||
		totalram_pages += totalhigh_pages;
 | 
			
		||||
	}
 | 
			
		||||
#endif /* CONFIG_HIGHMEM */
 | 
			
		||||
 | 
			
		||||
        printk("Memory: %luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n",
 | 
			
		||||
	       (unsigned long)nr_free_pages()<< (PAGE_SHIFT-10),
 | 
			
		||||
	       codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10),
 | 
			
		||||
	       initpages<< (PAGE_SHIFT-10),
 | 
			
		||||
	       (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_PPC_PMAC
 | 
			
		||||
	if (agp_special_page)
 | 
			
		||||
		printk(KERN_INFO "AGP special page: 0x%08lx\n", agp_special_page);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	mem_init_done = 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Set phys_avail to the amount of physical memory,
 | 
			
		||||
 * less the kernel text/data/bss.
 | 
			
		||||
 */
 | 
			
		||||
void __init
 | 
			
		||||
set_phys_avail(unsigned long total_memory)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long kstart, ksize;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Initially, available physical memory is equivalent to all
 | 
			
		||||
	 * physical memory.
 | 
			
		||||
	 */
 | 
			
		||||
 | 
			
		||||
	phys_avail.regions[0].address = PPC_MEMSTART;
 | 
			
		||||
	phys_avail.regions[0].size = total_memory;
 | 
			
		||||
	phys_avail.n_regions = 1;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Map out the kernel text/data/bss from the available physical
 | 
			
		||||
	 * memory.
 | 
			
		||||
	 */
 | 
			
		||||
 | 
			
		||||
	kstart = __pa(_stext);	/* should be 0 */
 | 
			
		||||
	ksize = PAGE_ALIGN(klimit - _stext);
 | 
			
		||||
 | 
			
		||||
	mem_pieces_remove(&phys_avail, kstart, ksize, 0);
 | 
			
		||||
	mem_pieces_remove(&phys_avail, 0, 0x4000, 0);
 | 
			
		||||
 | 
			
		||||
#if defined(CONFIG_BLK_DEV_INITRD)
 | 
			
		||||
	/* Remove the init RAM disk from the available memory. */
 | 
			
		||||
	if (initrd_start) {
 | 
			
		||||
		mem_pieces_remove(&phys_avail, __pa(initrd_start),
 | 
			
		||||
				  initrd_end - initrd_start, 1);
 | 
			
		||||
	}
 | 
			
		||||
#endif /* CONFIG_BLK_DEV_INITRD */
 | 
			
		||||
#ifdef CONFIG_PPC_OF
 | 
			
		||||
	/* remove the RTAS pages from the available memory */
 | 
			
		||||
	if (rtas_data)
 | 
			
		||||
		mem_pieces_remove(&phys_avail, rtas_data, rtas_size, 1);
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef CONFIG_PPC_PMAC
 | 
			
		||||
	/* Because of some uninorth weirdness, we need a page of
 | 
			
		||||
	 * memory as high as possible (it must be outside of the
 | 
			
		||||
	 * bus address seen as the AGP aperture). It will be used
 | 
			
		||||
	 * by the r128 DRM driver
 | 
			
		||||
	 *
 | 
			
		||||
	 * FIXME: We need to make sure that page doesn't overlap any of the\
 | 
			
		||||
	 * above. This could be done by improving mem_pieces_find to be able
 | 
			
		||||
	 * to do a backward search from the end of the list.
 | 
			
		||||
	 */
 | 
			
		||||
	if (_machine == _MACH_Pmac && find_devices("uni-north-agp")) {
 | 
			
		||||
		agp_special_page = (total_memory - PAGE_SIZE);
 | 
			
		||||
		mem_pieces_remove(&phys_avail, agp_special_page, PAGE_SIZE, 0);
 | 
			
		||||
		agp_special_page = (unsigned long)__va(agp_special_page);
 | 
			
		||||
	}
 | 
			
		||||
#endif /* CONFIG_PPC_PMAC */
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Mark some memory as reserved by removing it from phys_avail. */
 | 
			
		||||
void __init reserve_phys_mem(unsigned long start, unsigned long size)
 | 
			
		||||
{
 | 
			
		||||
	mem_pieces_remove(&phys_avail, start, size, 1);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -166,77 +166,6 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 | 
			
		|||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Initialize the bootmem system and give it all the memory we
 | 
			
		||||
 * have available.
 | 
			
		||||
 */
 | 
			
		||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
 | 
			
		||||
void __init do_init_bootmem(void)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long i;
 | 
			
		||||
	unsigned long start, bootmap_pages;
 | 
			
		||||
	unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
 | 
			
		||||
	int boot_mapsize;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Find an area to use for the bootmem bitmap.  Calculate the size of
 | 
			
		||||
	 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
 | 
			
		||||
	 * Add 1 additional page in case the address isn't page-aligned.
 | 
			
		||||
	 */
 | 
			
		||||
	bootmap_pages = bootmem_bootmap_pages(total_pages);
 | 
			
		||||
 | 
			
		||||
	start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
 | 
			
		||||
	BUG_ON(!start);
 | 
			
		||||
 | 
			
		||||
	boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
 | 
			
		||||
 | 
			
		||||
	max_pfn = max_low_pfn;
 | 
			
		||||
 | 
			
		||||
	/* Add all physical memory to the bootmem map, mark each area
 | 
			
		||||
	 * present.
 | 
			
		||||
	 */
 | 
			
		||||
	for (i=0; i < lmb.memory.cnt; i++)
 | 
			
		||||
		free_bootmem(lmb.memory.region[i].base,
 | 
			
		||||
			     lmb_size_bytes(&lmb.memory, i));
 | 
			
		||||
 | 
			
		||||
	/* reserve the sections we're already using */
 | 
			
		||||
	for (i=0; i < lmb.reserved.cnt; i++)
 | 
			
		||||
		reserve_bootmem(lmb.reserved.region[i].base,
 | 
			
		||||
				lmb_size_bytes(&lmb.reserved, i));
 | 
			
		||||
 | 
			
		||||
	for (i=0; i < lmb.memory.cnt; i++)
 | 
			
		||||
		memory_present(0, lmb_start_pfn(&lmb.memory, i),
 | 
			
		||||
			       lmb_end_pfn(&lmb.memory, i));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * paging_init() sets up the page tables - in fact we've already done this.
 | 
			
		||||
 */
 | 
			
		||||
void __init paging_init(void)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long zones_size[MAX_NR_ZONES];
 | 
			
		||||
	unsigned long zholes_size[MAX_NR_ZONES];
 | 
			
		||||
	unsigned long total_ram = lmb_phys_mem_size();
 | 
			
		||||
	unsigned long top_of_ram = lmb_end_of_DRAM();
 | 
			
		||||
 | 
			
		||||
	printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
 | 
			
		||||
	       top_of_ram, total_ram);
 | 
			
		||||
	printk(KERN_INFO "Memory hole size: %ldMB\n",
 | 
			
		||||
	       (top_of_ram - total_ram) >> 20);
 | 
			
		||||
	/*
 | 
			
		||||
	 * All pages are DMA-able so we put them all in the DMA zone.
 | 
			
		||||
	 */
 | 
			
		||||
	memset(zones_size, 0, sizeof(zones_size));
 | 
			
		||||
	memset(zholes_size, 0, sizeof(zholes_size));
 | 
			
		||||
 | 
			
		||||
	zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
 | 
			
		||||
	zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
 | 
			
		||||
 | 
			
		||||
	free_area_init_node(0, NODE_DATA(0), zones_size,
 | 
			
		||||
			    __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
 | 
			
		||||
}
 | 
			
		||||
#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
 | 
			
		||||
 | 
			
		||||
static struct kcore_list kcore_vmem;
 | 
			
		||||
 | 
			
		||||
static int __init setup_kcore(void)
 | 
			
		||||
| 
						 | 
				
			
			@ -264,61 +193,6 @@ static int __init setup_kcore(void)
 | 
			
		|||
}
 | 
			
		||||
module_init(setup_kcore);
 | 
			
		||||
 | 
			
		||||
void __init mem_init(void)
 | 
			
		||||
{
 | 
			
		||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
 | 
			
		||||
	int nid;
 | 
			
		||||
#endif
 | 
			
		||||
	pg_data_t *pgdat;
 | 
			
		||||
	unsigned long i;
 | 
			
		||||
	struct page *page;
 | 
			
		||||
	unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
 | 
			
		||||
 | 
			
		||||
	num_physpages = max_low_pfn;	/* RAM is assumed contiguous */
 | 
			
		||||
	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
 | 
			
		||||
        for_each_online_node(nid) {
 | 
			
		||||
		if (NODE_DATA(nid)->node_spanned_pages != 0) {
 | 
			
		||||
			printk("freeing bootmem node %x\n", nid);
 | 
			
		||||
			totalram_pages +=
 | 
			
		||||
				free_all_bootmem_node(NODE_DATA(nid));
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
#else
 | 
			
		||||
	max_mapnr = num_physpages;
 | 
			
		||||
	totalram_pages += free_all_bootmem();
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	for_each_pgdat(pgdat) {
 | 
			
		||||
		for (i = 0; i < pgdat->node_spanned_pages; i++) {
 | 
			
		||||
			page = pgdat_page_nr(pgdat, i);
 | 
			
		||||
			if (PageReserved(page))
 | 
			
		||||
				reservedpages++;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	codesize = (unsigned long)&_etext - (unsigned long)&_stext;
 | 
			
		||||
	initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
 | 
			
		||||
	datasize = (unsigned long)&_edata - (unsigned long)&__init_end;
 | 
			
		||||
	bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
 | 
			
		||||
 | 
			
		||||
	printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
 | 
			
		||||
	       "%luk reserved, %luk data, %luk bss, %luk init)\n",
 | 
			
		||||
		(unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
 | 
			
		||||
		num_physpages << (PAGE_SHIFT-10),
 | 
			
		||||
		codesize >> 10,
 | 
			
		||||
		reservedpages << (PAGE_SHIFT-10),
 | 
			
		||||
		datasize >> 10,
 | 
			
		||||
		bsssize >> 10,
 | 
			
		||||
		initsize >> 10);
 | 
			
		||||
 | 
			
		||||
	mem_init_done = 1;
 | 
			
		||||
 | 
			
		||||
	/* Initialize the vDSO */
 | 
			
		||||
	vdso_init();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void __iomem * reserve_phb_iospace(unsigned long size)
 | 
			
		||||
{
 | 
			
		||||
	void __iomem *virt_addr;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										296
									
								
								arch/powerpc/mm/lmb.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										296
									
								
								arch/powerpc/mm/lmb.c
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,296 @@
 | 
			
		|||
/*
 | 
			
		||||
 * Procedures for maintaining information about logical memory blocks.
 | 
			
		||||
 *
 | 
			
		||||
 * Peter Bergner, IBM Corp.	June 2001.
 | 
			
		||||
 * Copyright (C) 2001 Peter Bergner.
 | 
			
		||||
 * 
 | 
			
		||||
 *      This program is free software; you can redistribute it and/or
 | 
			
		||||
 *      modify it under the terms of the GNU General Public License
 | 
			
		||||
 *      as published by the Free Software Foundation; either version
 | 
			
		||||
 *      2 of the License, or (at your option) any later version.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include <linux/config.h>
 | 
			
		||||
#include <linux/kernel.h>
 | 
			
		||||
#include <linux/init.h>
 | 
			
		||||
#include <linux/bitops.h>
 | 
			
		||||
#include <asm/types.h>
 | 
			
		||||
#include <asm/page.h>
 | 
			
		||||
#include <asm/prom.h>
 | 
			
		||||
#include <asm/lmb.h>
 | 
			
		||||
#ifdef CONFIG_PPC32
 | 
			
		||||
#include "mmu_decl.h"		/* for __max_low_memory */
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
struct lmb lmb;
 | 
			
		||||
 | 
			
		||||
#undef DEBUG
 | 
			
		||||
 | 
			
		||||
void lmb_dump_all(void)
 | 
			
		||||
{
 | 
			
		||||
#ifdef DEBUG
 | 
			
		||||
	unsigned long i;
 | 
			
		||||
 | 
			
		||||
	udbg_printf("lmb_dump_all:\n");
 | 
			
		||||
	udbg_printf("    memory.cnt		  = 0x%lx\n",
 | 
			
		||||
		    lmb.memory.cnt);
 | 
			
		||||
	udbg_printf("    memory.size		  = 0x%lx\n",
 | 
			
		||||
		    lmb.memory.size);
 | 
			
		||||
	for (i=0; i < lmb.memory.cnt ;i++) {
 | 
			
		||||
		udbg_printf("    memory.region[0x%x].base       = 0x%lx\n",
 | 
			
		||||
			    i, lmb.memory.region[i].base);
 | 
			
		||||
		udbg_printf("		      .size     = 0x%lx\n",
 | 
			
		||||
			    lmb.memory.region[i].size);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	udbg_printf("\n    reserved.cnt	  = 0x%lx\n",
 | 
			
		||||
		    lmb.reserved.cnt);
 | 
			
		||||
	udbg_printf("    reserved.size	  = 0x%lx\n",
 | 
			
		||||
		    lmb.reserved.size);
 | 
			
		||||
	for (i=0; i < lmb.reserved.cnt ;i++) {
 | 
			
		||||
		udbg_printf("    reserved.region[0x%x].base       = 0x%lx\n",
 | 
			
		||||
			    i, lmb.reserved.region[i].base);
 | 
			
		||||
		udbg_printf("		      .size     = 0x%lx\n",
 | 
			
		||||
			    lmb.reserved.region[i].size);
 | 
			
		||||
	}
 | 
			
		||||
#endif /* DEBUG */
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static unsigned long __init lmb_addrs_overlap(unsigned long base1,
 | 
			
		||||
		unsigned long size1, unsigned long base2, unsigned long size2)
 | 
			
		||||
{
 | 
			
		||||
	return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
 | 
			
		||||
		unsigned long base2, unsigned long size2)
 | 
			
		||||
{
 | 
			
		||||
	if (base2 == base1 + size1)
 | 
			
		||||
		return 1;
 | 
			
		||||
	else if (base1 == base2 + size2)
 | 
			
		||||
		return -1;
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static long __init lmb_regions_adjacent(struct lmb_region *rgn,
 | 
			
		||||
		unsigned long r1, unsigned long r2)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long base1 = rgn->region[r1].base;
 | 
			
		||||
	unsigned long size1 = rgn->region[r1].size;
 | 
			
		||||
	unsigned long base2 = rgn->region[r2].base;
 | 
			
		||||
	unsigned long size2 = rgn->region[r2].size;
 | 
			
		||||
 | 
			
		||||
	return lmb_addrs_adjacent(base1, size1, base2, size2);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Assumption: base addr of region 1 < base addr of region 2 */
 | 
			
		||||
static void __init lmb_coalesce_regions(struct lmb_region *rgn,
 | 
			
		||||
		unsigned long r1, unsigned long r2)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long i;
 | 
			
		||||
 | 
			
		||||
	rgn->region[r1].size += rgn->region[r2].size;
 | 
			
		||||
	for (i=r2; i < rgn->cnt-1; i++) {
 | 
			
		||||
		rgn->region[i].base = rgn->region[i+1].base;
 | 
			
		||||
		rgn->region[i].size = rgn->region[i+1].size;
 | 
			
		||||
	}
 | 
			
		||||
	rgn->cnt--;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* This routine called with relocation disabled. */
 | 
			
		||||
void __init lmb_init(void)
 | 
			
		||||
{
 | 
			
		||||
	/* Create a dummy zero size LMB which will get coalesced away later.
 | 
			
		||||
	 * This simplifies the lmb_add() code below...
 | 
			
		||||
	 */
 | 
			
		||||
	lmb.memory.region[0].base = 0;
 | 
			
		||||
	lmb.memory.region[0].size = 0;
 | 
			
		||||
	lmb.memory.cnt = 1;
 | 
			
		||||
 | 
			
		||||
	/* Ditto. */
 | 
			
		||||
	lmb.reserved.region[0].base = 0;
 | 
			
		||||
	lmb.reserved.region[0].size = 0;
 | 
			
		||||
	lmb.reserved.cnt = 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* This routine may be called with relocation disabled. */
 | 
			
		||||
void __init lmb_analyze(void)
 | 
			
		||||
{
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	lmb.memory.size = 0;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < lmb.memory.cnt; i++)
 | 
			
		||||
		lmb.memory.size += lmb.memory.region[i].size;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* This routine called with relocation disabled. */
 | 
			
		||||
static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base,
 | 
			
		||||
				  unsigned long size)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long i, coalesced = 0;
 | 
			
		||||
	long adjacent;
 | 
			
		||||
 | 
			
		||||
	/* First try and coalesce this LMB with another. */
 | 
			
		||||
	for (i=0; i < rgn->cnt; i++) {
 | 
			
		||||
		unsigned long rgnbase = rgn->region[i].base;
 | 
			
		||||
		unsigned long rgnsize = rgn->region[i].size;
 | 
			
		||||
 | 
			
		||||
		adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
 | 
			
		||||
		if ( adjacent > 0 ) {
 | 
			
		||||
			rgn->region[i].base -= size;
 | 
			
		||||
			rgn->region[i].size += size;
 | 
			
		||||
			coalesced++;
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
		else if ( adjacent < 0 ) {
 | 
			
		||||
			rgn->region[i].size += size;
 | 
			
		||||
			coalesced++;
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
 | 
			
		||||
		lmb_coalesce_regions(rgn, i, i+1);
 | 
			
		||||
		coalesced++;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (coalesced)
 | 
			
		||||
		return coalesced;
 | 
			
		||||
	if (rgn->cnt >= MAX_LMB_REGIONS)
 | 
			
		||||
		return -1;
 | 
			
		||||
 | 
			
		||||
	/* Couldn't coalesce the LMB, so add it to the sorted table. */
 | 
			
		||||
	for (i = rgn->cnt-1; i >= 0; i--) {
 | 
			
		||||
		if (base < rgn->region[i].base) {
 | 
			
		||||
			rgn->region[i+1].base = rgn->region[i].base;
 | 
			
		||||
			rgn->region[i+1].size = rgn->region[i].size;
 | 
			
		||||
		} else {
 | 
			
		||||
			rgn->region[i+1].base = base;
 | 
			
		||||
			rgn->region[i+1].size = size;
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	rgn->cnt++;
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* This routine may be called with relocation disabled. */
 | 
			
		||||
long __init lmb_add(unsigned long base, unsigned long size)
 | 
			
		||||
{
 | 
			
		||||
	struct lmb_region *_rgn = &(lmb.memory);
 | 
			
		||||
 | 
			
		||||
	/* On pSeries LPAR systems, the first LMB is our RMO region. */
 | 
			
		||||
	if (base == 0)
 | 
			
		||||
		lmb.rmo_size = size;
 | 
			
		||||
 | 
			
		||||
	return lmb_add_region(_rgn, base, size);
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
long __init lmb_reserve(unsigned long base, unsigned long size)
 | 
			
		||||
{
 | 
			
		||||
	struct lmb_region *_rgn = &(lmb.reserved);
 | 
			
		||||
 | 
			
		||||
	return lmb_add_region(_rgn, base, size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base,
 | 
			
		||||
				unsigned long size)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long i;
 | 
			
		||||
 | 
			
		||||
	for (i=0; i < rgn->cnt; i++) {
 | 
			
		||||
		unsigned long rgnbase = rgn->region[i].base;
 | 
			
		||||
		unsigned long rgnsize = rgn->region[i].size;
 | 
			
		||||
		if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return (i < rgn->cnt) ? i : -1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
unsigned long __init lmb_alloc(unsigned long size, unsigned long align)
 | 
			
		||||
{
 | 
			
		||||
	return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
 | 
			
		||||
				    unsigned long max_addr)
 | 
			
		||||
{
 | 
			
		||||
	long i, j;
 | 
			
		||||
	unsigned long base = 0;
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_PPC32
 | 
			
		||||
	/* On 32-bit, make sure we allocate lowmem */
 | 
			
		||||
	if (max_addr == LMB_ALLOC_ANYWHERE)
 | 
			
		||||
		max_addr = __max_low_memory;
 | 
			
		||||
#endif
 | 
			
		||||
	for (i = lmb.memory.cnt-1; i >= 0; i--) {
 | 
			
		||||
		unsigned long lmbbase = lmb.memory.region[i].base;
 | 
			
		||||
		unsigned long lmbsize = lmb.memory.region[i].size;
 | 
			
		||||
 | 
			
		||||
		if (max_addr == LMB_ALLOC_ANYWHERE)
 | 
			
		||||
			base = _ALIGN_DOWN(lmbbase + lmbsize - size, align);
 | 
			
		||||
		else if (lmbbase < max_addr) {
 | 
			
		||||
			base = min(lmbbase + lmbsize, max_addr);
 | 
			
		||||
			base = _ALIGN_DOWN(base - size, align);
 | 
			
		||||
		} else
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
		while ((lmbbase <= base) &&
 | 
			
		||||
		       ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) )
 | 
			
		||||
			base = _ALIGN_DOWN(lmb.reserved.region[j].base - size,
 | 
			
		||||
					   align);
 | 
			
		||||
 | 
			
		||||
		if ((base != 0) && (lmbbase <= base))
 | 
			
		||||
			break;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (i < 0)
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	lmb_add_region(&lmb.reserved, base, size);
 | 
			
		||||
 | 
			
		||||
	return base;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* You must call lmb_analyze() before this. */
 | 
			
		||||
unsigned long __init lmb_phys_mem_size(void)
 | 
			
		||||
{
 | 
			
		||||
	return lmb.memory.size;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
unsigned long __init lmb_end_of_DRAM(void)
 | 
			
		||||
{
 | 
			
		||||
	int idx = lmb.memory.cnt - 1;
 | 
			
		||||
 | 
			
		||||
	return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Truncate the lmb list to memory_limit if it's set
 | 
			
		||||
 * You must call lmb_analyze() after this.
 | 
			
		||||
 */
 | 
			
		||||
void __init lmb_enforce_memory_limit(unsigned long memory_limit)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long i, limit;
 | 
			
		||||
 | 
			
		||||
	if (! memory_limit)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	limit = memory_limit;
 | 
			
		||||
	for (i = 0; i < lmb.memory.cnt; i++) {
 | 
			
		||||
		if (limit > lmb.memory.region[i].size) {
 | 
			
		||||
			limit -= lmb.memory.region[i].size;
 | 
			
		||||
			continue;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		lmb.memory.region[i].size = limit;
 | 
			
		||||
		lmb.memory.cnt = i + 1;
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -45,8 +45,9 @@
 | 
			
		|||
#include <asm/tlb.h>
 | 
			
		||||
#include <asm/bootinfo.h>
 | 
			
		||||
#include <asm/prom.h>
 | 
			
		||||
#include <asm/lmb.h>
 | 
			
		||||
#include <asm/sections.h>
 | 
			
		||||
 | 
			
		||||
#include "mem_pieces.h"
 | 
			
		||||
#include "mmu_decl.h"
 | 
			
		||||
 | 
			
		||||
#ifndef CPU_FTR_COHERENT_ICACHE
 | 
			
		||||
| 
						 | 
				
			
			@ -54,6 +55,9 @@
 | 
			
		|||
#define CPU_FTR_NOEXECUTE	0
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
int init_bootmem_done;
 | 
			
		||||
int mem_init_done;
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * This is called by /dev/mem to know if a given address has to
 | 
			
		||||
 * be mapped non-cacheable or not
 | 
			
		||||
| 
						 | 
				
			
			@ -130,6 +134,185 @@ void show_mem(void)
 | 
			
		|||
	printk("%ld pages swap cached\n", cached);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Initialize the bootmem system and give it all the memory we
 | 
			
		||||
 * have available.  If we are using highmem, we only put the
 | 
			
		||||
 * lowmem into the bootmem system.
 | 
			
		||||
 */
 | 
			
		||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
 | 
			
		||||
void __init do_init_bootmem(void)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long i;
 | 
			
		||||
	unsigned long start, bootmap_pages;
 | 
			
		||||
	unsigned long total_pages;
 | 
			
		||||
	int boot_mapsize;
 | 
			
		||||
 | 
			
		||||
	max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
 | 
			
		||||
#ifdef CONFIG_HIGHMEM
 | 
			
		||||
	total_pages = total_lowmem >> PAGE_SHIFT;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Find an area to use for the bootmem bitmap.  Calculate the size of
 | 
			
		||||
	 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
 | 
			
		||||
	 * Add 1 additional page in case the address isn't page-aligned.
 | 
			
		||||
	 */
 | 
			
		||||
	bootmap_pages = bootmem_bootmap_pages(total_pages);
 | 
			
		||||
 | 
			
		||||
	start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
 | 
			
		||||
	BUG_ON(!start);
 | 
			
		||||
 | 
			
		||||
	boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
 | 
			
		||||
 | 
			
		||||
	/* Add all physical memory to the bootmem map, mark each area
 | 
			
		||||
	 * present.
 | 
			
		||||
	 */
 | 
			
		||||
	for (i = 0; i < lmb.memory.cnt; i++) {
 | 
			
		||||
		unsigned long base = lmb.memory.region[i].base;
 | 
			
		||||
		unsigned long size = lmb_size_bytes(&lmb.memory, i);
 | 
			
		||||
#ifdef CONFIG_HIGHMEM
 | 
			
		||||
		if (base >= total_lowmem)
 | 
			
		||||
			continue;
 | 
			
		||||
		if (base + size > total_lowmem)
 | 
			
		||||
			size = total_lowmem - base;
 | 
			
		||||
#endif
 | 
			
		||||
		free_bootmem(base, size);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* reserve the sections we're already using */
 | 
			
		||||
	for (i = 0; i < lmb.reserved.cnt; i++)
 | 
			
		||||
		reserve_bootmem(lmb.reserved.region[i].base,
 | 
			
		||||
				lmb_size_bytes(&lmb.reserved, i));
 | 
			
		||||
 | 
			
		||||
	/* XXX need to clip this if using highmem? */
 | 
			
		||||
	for (i = 0; i < lmb.memory.cnt; i++)
 | 
			
		||||
		memory_present(0, lmb_start_pfn(&lmb.memory, i),
 | 
			
		||||
			       lmb_end_pfn(&lmb.memory, i));
 | 
			
		||||
	init_bootmem_done = 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * paging_init() sets up the page tables - in fact we've already done this.
 | 
			
		||||
 */
 | 
			
		||||
void __init paging_init(void)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long zones_size[MAX_NR_ZONES];
 | 
			
		||||
	unsigned long zholes_size[MAX_NR_ZONES];
 | 
			
		||||
	unsigned long total_ram = lmb_phys_mem_size();
 | 
			
		||||
	unsigned long top_of_ram = lmb_end_of_DRAM();
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_HIGHMEM
 | 
			
		||||
	map_page(PKMAP_BASE, 0, 0);	/* XXX gross */
 | 
			
		||||
	pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
 | 
			
		||||
			(PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
 | 
			
		||||
	map_page(KMAP_FIX_BEGIN, 0, 0);	/* XXX gross */
 | 
			
		||||
	kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
 | 
			
		||||
			(KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
 | 
			
		||||
	kmap_prot = PAGE_KERNEL;
 | 
			
		||||
#endif /* CONFIG_HIGHMEM */
 | 
			
		||||
 | 
			
		||||
	printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
 | 
			
		||||
	       top_of_ram, total_ram);
 | 
			
		||||
	printk(KERN_INFO "Memory hole size: %ldMB\n",
 | 
			
		||||
	       (top_of_ram - total_ram) >> 20);
 | 
			
		||||
	/*
 | 
			
		||||
	 * All pages are DMA-able so we put them all in the DMA zone.
 | 
			
		||||
	 */
 | 
			
		||||
	memset(zones_size, 0, sizeof(zones_size));
 | 
			
		||||
	memset(zholes_size, 0, sizeof(zholes_size));
 | 
			
		||||
 | 
			
		||||
	zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
 | 
			
		||||
	zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_HIGHMEM
 | 
			
		||||
	zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
 | 
			
		||||
	zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;
 | 
			
		||||
	zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT;
 | 
			
		||||
#else
 | 
			
		||||
	zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
 | 
			
		||||
	zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
 | 
			
		||||
#endif /* CONFIG_HIGHMEM */
 | 
			
		||||
 | 
			
		||||
	free_area_init_node(0, NODE_DATA(0), zones_size,
 | 
			
		||||
			    __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
 | 
			
		||||
}
 | 
			
		||||
#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
 | 
			
		||||
 | 
			
		||||
void __init mem_init(void)
 | 
			
		||||
{
 | 
			
		||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
 | 
			
		||||
	int nid;
 | 
			
		||||
#endif
 | 
			
		||||
	pg_data_t *pgdat;
 | 
			
		||||
	unsigned long i;
 | 
			
		||||
	struct page *page;
 | 
			
		||||
	unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
 | 
			
		||||
 | 
			
		||||
	num_physpages = max_pfn;	/* RAM is assumed contiguous */
 | 
			
		||||
	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
 | 
			
		||||
        for_each_online_node(nid) {
 | 
			
		||||
		if (NODE_DATA(nid)->node_spanned_pages != 0) {
 | 
			
		||||
			printk("freeing bootmem node %x\n", nid);
 | 
			
		||||
			totalram_pages +=
 | 
			
		||||
				free_all_bootmem_node(NODE_DATA(nid));
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
#else
 | 
			
		||||
	max_mapnr = num_physpages;
 | 
			
		||||
	totalram_pages += free_all_bootmem();
 | 
			
		||||
#endif
 | 
			
		||||
	for_each_pgdat(pgdat) {
 | 
			
		||||
		for (i = 0; i < pgdat->node_spanned_pages; i++) {
 | 
			
		||||
			page = pgdat_page_nr(pgdat, i);
 | 
			
		||||
			if (PageReserved(page))
 | 
			
		||||
				reservedpages++;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
 | 
			
		||||
	datasize = (unsigned long)&__init_begin - (unsigned long)&_sdata;
 | 
			
		||||
	initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
 | 
			
		||||
	bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_HIGHMEM
 | 
			
		||||
	{
 | 
			
		||||
		unsigned long pfn, highmem_mapnr;
 | 
			
		||||
 | 
			
		||||
		highmem_mapnr = total_lowmem >> PAGE_SHIFT;
 | 
			
		||||
		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
 | 
			
		||||
			struct page *page = pfn_to_page(pfn);
 | 
			
		||||
 | 
			
		||||
			ClearPageReserved(page);
 | 
			
		||||
			set_page_count(page, 1);
 | 
			
		||||
			__free_page(page);
 | 
			
		||||
			totalhigh_pages++;
 | 
			
		||||
		}
 | 
			
		||||
		totalram_pages += totalhigh_pages;
 | 
			
		||||
		printk(KERN_INFO "High memory: %luk\n",
 | 
			
		||||
		       totalhigh_pages << (PAGE_SHIFT-10));
 | 
			
		||||
	}
 | 
			
		||||
#endif /* CONFIG_HIGHMEM */
 | 
			
		||||
 | 
			
		||||
	printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
 | 
			
		||||
	       "%luk reserved, %luk data, %luk bss, %luk init)\n",
 | 
			
		||||
		(unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
 | 
			
		||||
		num_physpages << (PAGE_SHIFT-10),
 | 
			
		||||
		codesize >> 10,
 | 
			
		||||
		reservedpages << (PAGE_SHIFT-10),
 | 
			
		||||
		datasize >> 10,
 | 
			
		||||
		bsssize >> 10,
 | 
			
		||||
		initsize >> 10);
 | 
			
		||||
 | 
			
		||||
	mem_init_done = 1;
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_PPC64
 | 
			
		||||
	/* Initialize the vDSO */
 | 
			
		||||
	vdso_init();
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * This is called when a page has been modified by the kernel.
 | 
			
		||||
 * It just marks the page as not i-cache clean.  We do the i-cache
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,163 +0,0 @@
 | 
			
		|||
/*
 | 
			
		||||
 *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
 | 
			
		||||
 *      Changes to accommodate Power Macintoshes.
 | 
			
		||||
 *    Cort Dougan <cort@cs.nmt.edu>
 | 
			
		||||
 *      Rewrites.
 | 
			
		||||
 *    Grant Erickson <grant@lcse.umn.edu>
 | 
			
		||||
 *      General rework and split from mm/init.c.
 | 
			
		||||
 *
 | 
			
		||||
 *    Module name: mem_pieces.c
 | 
			
		||||
 *
 | 
			
		||||
 *    Description:
 | 
			
		||||
 *      Routines and data structures for manipulating and representing
 | 
			
		||||
 *      phyiscal memory extents (i.e. address/length pairs).
 | 
			
		||||
 *
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include <linux/config.h>
 | 
			
		||||
#include <linux/kernel.h>
 | 
			
		||||
#include <linux/stddef.h>
 | 
			
		||||
#include <linux/init.h>
 | 
			
		||||
#include <asm/page.h>
 | 
			
		||||
 | 
			
		||||
#include "mem_pieces.h"
 | 
			
		||||
 | 
			
		||||
extern struct mem_pieces phys_avail;
 | 
			
		||||
 | 
			
		||||
static void mem_pieces_print(struct mem_pieces *);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Scan a region for a piece of a given size with the required alignment.
 | 
			
		||||
 */
 | 
			
		||||
void __init *
 | 
			
		||||
mem_pieces_find(unsigned int size, unsigned int align)
 | 
			
		||||
{
 | 
			
		||||
	int i;
 | 
			
		||||
	unsigned a, e;
 | 
			
		||||
	struct mem_pieces *mp = &phys_avail;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < mp->n_regions; ++i) {
 | 
			
		||||
		a = mp->regions[i].address;
 | 
			
		||||
		e = a + mp->regions[i].size;
 | 
			
		||||
		a = (a + align - 1) & -align;
 | 
			
		||||
		if (a + size <= e) {
 | 
			
		||||
			mem_pieces_remove(mp, a, size, 1);
 | 
			
		||||
			return (void *) __va(a);
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	panic("Couldn't find %u bytes at %u alignment\n", size, align);
 | 
			
		||||
 | 
			
		||||
	return NULL;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Remove some memory from an array of pieces
 | 
			
		||||
 */
 | 
			
		||||
void __init
 | 
			
		||||
mem_pieces_remove(struct mem_pieces *mp, unsigned int start, unsigned int size,
 | 
			
		||||
		  int must_exist)
 | 
			
		||||
{
 | 
			
		||||
	int i, j;
 | 
			
		||||
	unsigned int end, rs, re;
 | 
			
		||||
	struct reg_property *rp;
 | 
			
		||||
 | 
			
		||||
	end = start + size;
 | 
			
		||||
	for (i = 0, rp = mp->regions; i < mp->n_regions; ++i, ++rp) {
 | 
			
		||||
		if (end > rp->address && start < rp->address + rp->size)
 | 
			
		||||
			break;
 | 
			
		||||
	}
 | 
			
		||||
	if (i >= mp->n_regions) {
 | 
			
		||||
		if (must_exist)
 | 
			
		||||
			printk("mem_pieces_remove: [%x,%x) not in any region\n",
 | 
			
		||||
			       start, end);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
	for (; i < mp->n_regions && end > rp->address; ++i, ++rp) {
 | 
			
		||||
		rs = rp->address;
 | 
			
		||||
		re = rs + rp->size;
 | 
			
		||||
		if (must_exist && (start < rs || end > re)) {
 | 
			
		||||
			printk("mem_pieces_remove: bad overlap [%x,%x) with",
 | 
			
		||||
			       start, end);
 | 
			
		||||
			mem_pieces_print(mp);
 | 
			
		||||
			must_exist = 0;
 | 
			
		||||
		}
 | 
			
		||||
		if (start > rs) {
 | 
			
		||||
			rp->size = start - rs;
 | 
			
		||||
			if (end < re) {
 | 
			
		||||
				/* need to split this entry */
 | 
			
		||||
				if (mp->n_regions >= MEM_PIECES_MAX)
 | 
			
		||||
					panic("eek... mem_pieces overflow");
 | 
			
		||||
				for (j = mp->n_regions; j > i + 1; --j)
 | 
			
		||||
					mp->regions[j] = mp->regions[j-1];
 | 
			
		||||
				++mp->n_regions;
 | 
			
		||||
				rp[1].address = end;
 | 
			
		||||
				rp[1].size = re - end;
 | 
			
		||||
			}
 | 
			
		||||
		} else {
 | 
			
		||||
			if (end < re) {
 | 
			
		||||
				rp->address = end;
 | 
			
		||||
				rp->size = re - end;
 | 
			
		||||
			} else {
 | 
			
		||||
				/* need to delete this entry */
 | 
			
		||||
				for (j = i; j < mp->n_regions - 1; ++j)
 | 
			
		||||
					mp->regions[j] = mp->regions[j+1];
 | 
			
		||||
				--mp->n_regions;
 | 
			
		||||
				--i;
 | 
			
		||||
				--rp;
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void __init
 | 
			
		||||
mem_pieces_print(struct mem_pieces *mp)
 | 
			
		||||
{
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < mp->n_regions; ++i)
 | 
			
		||||
		printk(" [%x, %x)", mp->regions[i].address,
 | 
			
		||||
		       mp->regions[i].address + mp->regions[i].size);
 | 
			
		||||
	printk("\n");
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void __init
 | 
			
		||||
mem_pieces_sort(struct mem_pieces *mp)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long a, s;
 | 
			
		||||
	int i, j;
 | 
			
		||||
 | 
			
		||||
	for (i = 1; i < mp->n_regions; ++i) {
 | 
			
		||||
		a = mp->regions[i].address;
 | 
			
		||||
		s = mp->regions[i].size;
 | 
			
		||||
		for (j = i - 1; j >= 0; --j) {
 | 
			
		||||
			if (a >= mp->regions[j].address)
 | 
			
		||||
				break;
 | 
			
		||||
			mp->regions[j+1] = mp->regions[j];
 | 
			
		||||
		}
 | 
			
		||||
		mp->regions[j+1].address = a;
 | 
			
		||||
		mp->regions[j+1].size = s;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void __init
 | 
			
		||||
mem_pieces_coalesce(struct mem_pieces *mp)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long a, s, ns;
 | 
			
		||||
	int i, j, d;
 | 
			
		||||
 | 
			
		||||
	d = 0;
 | 
			
		||||
	for (i = 0; i < mp->n_regions; i = j) {
 | 
			
		||||
		a = mp->regions[i].address;
 | 
			
		||||
		s = mp->regions[i].size;
 | 
			
		||||
		for (j = i + 1; j < mp->n_regions
 | 
			
		||||
			     && mp->regions[j].address - a <= s; ++j) {
 | 
			
		||||
			ns = mp->regions[j].address + mp->regions[j].size - a;
 | 
			
		||||
			if (ns > s)
 | 
			
		||||
				s = ns;
 | 
			
		||||
		}
 | 
			
		||||
		mp->regions[d].address = a;
 | 
			
		||||
		mp->regions[d].size = s;
 | 
			
		||||
		++d;
 | 
			
		||||
	}
 | 
			
		||||
	mp->n_regions = d;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -1,48 +0,0 @@
 | 
			
		|||
/*
 | 
			
		||||
 *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
 | 
			
		||||
 *      Changes to accommodate Power Macintoshes.
 | 
			
		||||
 *    Cort Dougan <cort@cs.nmt.edu>
 | 
			
		||||
 *      Rewrites.
 | 
			
		||||
 *    Grant Erickson <grant@lcse.umn.edu>
 | 
			
		||||
 *      General rework and split from mm/init.c.
 | 
			
		||||
 *
 | 
			
		||||
 *    Module name: mem_pieces.h
 | 
			
		||||
 *
 | 
			
		||||
 *    Description:
 | 
			
		||||
 *      Routines and data structures for manipulating and representing
 | 
			
		||||
 *      phyiscal memory extents (i.e. address/length pairs).
 | 
			
		||||
 *
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#ifndef __MEM_PIECES_H__
 | 
			
		||||
#define	__MEM_PIECES_H__
 | 
			
		||||
 | 
			
		||||
#include <asm/prom.h>
 | 
			
		||||
 | 
			
		||||
#ifdef __cplusplus
 | 
			
		||||
extern "C" {
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/* Type Definitions */
 | 
			
		||||
 | 
			
		||||
#define	MEM_PIECES_MAX	32
 | 
			
		||||
 | 
			
		||||
struct mem_pieces {
 | 
			
		||||
    int n_regions;
 | 
			
		||||
    struct reg_property regions[MEM_PIECES_MAX];
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/* Function Prototypes */
 | 
			
		||||
 | 
			
		||||
extern void	*mem_pieces_find(unsigned int size, unsigned int align);
 | 
			
		||||
extern void	 mem_pieces_remove(struct mem_pieces *mp, unsigned int start,
 | 
			
		||||
				   unsigned int size, int must_exist);
 | 
			
		||||
extern void	 mem_pieces_coalesce(struct mem_pieces *mp);
 | 
			
		||||
extern void	 mem_pieces_sort(struct mem_pieces *mp);
 | 
			
		||||
 | 
			
		||||
#ifdef __cplusplus
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#endif /* __MEM_PIECES_H__ */
 | 
			
		||||
| 
						 | 
				
			
			@ -36,6 +36,8 @@ extern unsigned long ioremap_base;
 | 
			
		|||
extern unsigned long ioremap_bot;
 | 
			
		||||
extern unsigned int rtas_data, rtas_size;
 | 
			
		||||
 | 
			
		||||
extern unsigned long __max_low_memory;
 | 
			
		||||
extern unsigned long __initial_memory_limit;
 | 
			
		||||
extern unsigned long total_memory;
 | 
			
		||||
extern unsigned long total_lowmem;
 | 
			
		||||
extern int mem_init_done;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -190,8 +190,7 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
 | 
			
		|||
	 * Don't allow anybody to remap normal RAM that we're using.
 | 
			
		||||
	 * mem_init() sets high_memory so only do the check after that.
 | 
			
		||||
	 */
 | 
			
		||||
	if ( mem_init_done && (p < virt_to_phys(high_memory)) )
 | 
			
		||||
	{
 | 
			
		||||
	if (mem_init_done && (p < virt_to_phys(high_memory))) {
 | 
			
		||||
		printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p,
 | 
			
		||||
		       __builtin_return_address(0));
 | 
			
		||||
		return NULL;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -32,9 +32,9 @@
 | 
			
		|||
#include <asm/prom.h>
 | 
			
		||||
#include <asm/mmu.h>
 | 
			
		||||
#include <asm/machdep.h>
 | 
			
		||||
#include <asm/lmb.h>
 | 
			
		||||
 | 
			
		||||
#include "mmu_decl.h"
 | 
			
		||||
#include "mem_pieces.h"
 | 
			
		||||
 | 
			
		||||
PTE *Hash, *Hash_end;
 | 
			
		||||
unsigned long Hash_size, Hash_mask;
 | 
			
		||||
| 
						 | 
				
			
			@ -215,17 +215,6 @@ void __init MMU_init_hw(void)
 | 
			
		|||
#define MIN_N_HPTEG	1024		/* min 64kB hash table */
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_POWER4
 | 
			
		||||
	/* The hash table has already been allocated and initialized
 | 
			
		||||
	   in prom.c */
 | 
			
		||||
	n_hpteg = Hash_size >> LG_HPTEG_SIZE;
 | 
			
		||||
	lg_n_hpteg = __ilog2(n_hpteg);
 | 
			
		||||
 | 
			
		||||
	/* Remove the hash table from the available memory */
 | 
			
		||||
	if (Hash)
 | 
			
		||||
		reserve_phys_mem(__pa(Hash), Hash_size);
 | 
			
		||||
 | 
			
		||||
#else /* CONFIG_POWER4 */
 | 
			
		||||
	/*
 | 
			
		||||
	 * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
 | 
			
		||||
	 * This is less than the recommended amount, but then
 | 
			
		||||
| 
						 | 
				
			
			@ -245,10 +234,10 @@ void __init MMU_init_hw(void)
 | 
			
		|||
	 * Find some memory for the hash table.
 | 
			
		||||
	 */
 | 
			
		||||
	if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
 | 
			
		||||
	Hash = mem_pieces_find(Hash_size, Hash_size);
 | 
			
		||||
	Hash = __va(lmb_alloc_base(Hash_size, Hash_size,
 | 
			
		||||
				   __initial_memory_limit));
 | 
			
		||||
	cacheable_memzero(Hash, Hash_size);
 | 
			
		||||
	_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
 | 
			
		||||
#endif /* CONFIG_POWER4 */
 | 
			
		||||
 | 
			
		||||
	Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -34,6 +34,17 @@ typedef unsigned long pte_basic_t;
 | 
			
		|||
#define PTE_FMT		"%.8lx"
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/* align addr on a size boundary - adjust address up/down if needed */
 | 
			
		||||
#define _ALIGN_UP(addr,size)	(((addr)+((size)-1))&(~((size)-1)))
 | 
			
		||||
#define _ALIGN_DOWN(addr,size)	((addr)&(~((size)-1)))
 | 
			
		||||
 | 
			
		||||
/* align addr on a size boundary - adjust address up if needed */
 | 
			
		||||
#define _ALIGN(addr,size)     _ALIGN_UP(addr,size)
 | 
			
		||||
 | 
			
		||||
/* to align the pointer to the (next) page boundary */
 | 
			
		||||
#define PAGE_ALIGN(addr)	_ALIGN(addr, PAGE_SIZE)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#undef STRICT_MM_TYPECHECKS
 | 
			
		||||
 | 
			
		||||
#ifdef STRICT_MM_TYPECHECKS
 | 
			
		||||
| 
						 | 
				
			
			@ -76,13 +87,6 @@ typedef unsigned long pgprot_t;
 | 
			
		|||
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/* align addr on a size boundary - adjust address up if needed -- Cort */
 | 
			
		||||
#define _ALIGN(addr,size)	(((addr)+(size)-1)&(~((size)-1)))
 | 
			
		||||
 | 
			
		||||
/* to align the pointer to the (next) page boundary */
 | 
			
		||||
#define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
 | 
			
		||||
 | 
			
		||||
struct page;
 | 
			
		||||
extern void clear_pages(void *page, int order);
 | 
			
		||||
static inline void clear_page(void *page) { clear_pages(page, 0); }
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue