forked from mirrors/linux
		
	cma: fix counting of isolated pages
Isolated free pages shouldn't be accounted to NR_FREE_PAGES counter. Fix it by properly decreasing/increasing NR_FREE_PAGES counter in set_migratetype_isolate()/unset_migratetype_isolate() and removing counter adjustment for isolated pages from free_one_page() and split_free_page(). Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									770c8aaaf6
								
							
						
					
					
						commit
						2139cbe627
					
				
					 2 changed files with 16 additions and 5 deletions
				
			
		|  | @ -691,7 +691,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order, | |||
| 	zone->pages_scanned = 0; | ||||
| 
 | ||||
| 	__free_one_page(page, zone, order, migratetype); | ||||
| 	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); | ||||
| 	if (unlikely(migratetype != MIGRATE_ISOLATE)) | ||||
| 		__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); | ||||
| 	spin_unlock(&zone->lock); | ||||
| } | ||||
| 
 | ||||
|  | @ -1392,6 +1393,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) | |||
| 	unsigned int order; | ||||
| 	unsigned long watermark; | ||||
| 	struct zone *zone; | ||||
| 	int mt; | ||||
| 
 | ||||
| 	BUG_ON(!PageBuddy(page)); | ||||
| 
 | ||||
|  | @ -1407,7 +1409,10 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) | |||
| 	list_del(&page->lru); | ||||
| 	zone->free_area[order].nr_free--; | ||||
| 	rmv_page_order(page); | ||||
| 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order)); | ||||
| 
 | ||||
| 	mt = get_pageblock_migratetype(page); | ||||
| 	if (unlikely(mt != MIGRATE_ISOLATE)) | ||||
| 		__mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order)); | ||||
| 
 | ||||
| 	if (alloc_order != order) | ||||
| 		expand(zone, page, alloc_order, order, | ||||
|  |  | |||
|  | @ -76,8 +76,12 @@ int set_migratetype_isolate(struct page *page) | |||
| 
 | ||||
| out: | ||||
| 	if (!ret) { | ||||
| 		unsigned long nr_pages; | ||||
| 
 | ||||
| 		set_pageblock_isolate(page); | ||||
| 		move_freepages_block(zone, page, MIGRATE_ISOLATE); | ||||
| 		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); | ||||
| 
 | ||||
| 		__mod_zone_page_state(zone, NR_FREE_PAGES, -nr_pages); | ||||
| 	} | ||||
| 
 | ||||
| 	spin_unlock_irqrestore(&zone->lock, flags); | ||||
|  | @ -89,12 +93,14 @@ int set_migratetype_isolate(struct page *page) | |||
| void unset_migratetype_isolate(struct page *page, unsigned migratetype) | ||||
| { | ||||
| 	struct zone *zone; | ||||
| 	unsigned long flags; | ||||
| 	unsigned long flags, nr_pages; | ||||
| 
 | ||||
| 	zone = page_zone(page); | ||||
| 	spin_lock_irqsave(&zone->lock, flags); | ||||
| 	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) | ||||
| 		goto out; | ||||
| 	move_freepages_block(zone, page, migratetype); | ||||
| 	nr_pages = move_freepages_block(zone, page, migratetype); | ||||
| 	__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); | ||||
| 	restore_pageblock_isolate(page, migratetype); | ||||
| out: | ||||
| 	spin_unlock_irqrestore(&zone->lock, flags); | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Bartlomiej Zolnierkiewicz
						Bartlomiej Zolnierkiewicz