mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm: fix usemap initialization
usemap must be initialized only when pfn is within zone. If not, it corrupts memory. And this patch also reduces the number of calls to set_pageblock_migratetype() from (pfn & (pageblock_nr_pages -1) to !(pfn & (pageblock_nr_pages-1) it should be called once per pageblock. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: Hugh Dickins <hugh@veritas.com> Cc: Shi Weihua <shiwh@cn.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Pavel Emelyanov <xemul@openvz.org> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									a01e035ebb
								
							
						
					
					
						commit
						86051ca5ea
					
				
					 1 changed files with 12 additions and 2 deletions
				
			
		| 
						 | 
				
			
			@ -2524,7 +2524,9 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
 | 
			
		|||
	struct page *page;
 | 
			
		||||
	unsigned long end_pfn = start_pfn + size;
 | 
			
		||||
	unsigned long pfn;
 | 
			
		||||
	struct zone *z;
 | 
			
		||||
 | 
			
		||||
	z = &NODE_DATA(nid)->node_zones[zone];
 | 
			
		||||
	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
 | 
			
		||||
		/*
 | 
			
		||||
		 * There can be holes in boot-time mem_map[]s
 | 
			
		||||
| 
						 | 
				
			
			@ -2542,7 +2544,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
 | 
			
		|||
		init_page_count(page);
 | 
			
		||||
		reset_page_mapcount(page);
 | 
			
		||||
		SetPageReserved(page);
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * Mark the block movable so that blocks are reserved for
 | 
			
		||||
		 * movable at startup. This will force kernel allocations
 | 
			
		||||
| 
						 | 
				
			
			@ -2551,8 +2552,15 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
 | 
			
		|||
		 * kernel allocations are made. Later some blocks near
 | 
			
		||||
		 * the start are marked MIGRATE_RESERVE by
 | 
			
		||||
		 * setup_zone_migrate_reserve()
 | 
			
		||||
		 *
 | 
			
		||||
		 * bitmap is created for zone's valid pfn range. but memmap
 | 
			
		||||
		 * can be created for invalid pages (for alignment)
 | 
			
		||||
		 * check here not to call set_pageblock_migratetype() against
 | 
			
		||||
		 * pfn out of zone.
 | 
			
		||||
		 */
 | 
			
		||||
		if ((pfn & (pageblock_nr_pages-1)))
 | 
			
		||||
		if ((z->zone_start_pfn <= pfn)
 | 
			
		||||
		    && (pfn < z->zone_start_pfn + z->spanned_pages)
 | 
			
		||||
		    && !(pfn & (pageblock_nr_pages - 1)))
 | 
			
		||||
			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
 | 
			
		||||
 | 
			
		||||
		INIT_LIST_HEAD(&page->lru);
 | 
			
		||||
| 
						 | 
				
			
			@ -4464,6 +4472,8 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
 | 
			
		|||
	pfn = page_to_pfn(page);
 | 
			
		||||
	bitmap = get_pageblock_bitmap(zone, pfn);
 | 
			
		||||
	bitidx = pfn_to_bitidx(zone, pfn);
 | 
			
		||||
	VM_BUG_ON(pfn < zone->zone_start_pfn);
 | 
			
		||||
	VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
 | 
			
		||||
 | 
			
		||||
	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
 | 
			
		||||
		if (flags & value)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue