forked from mirrors/linux
		
	mm: don't assume anonymous pages have SwapBacked flag
There are a few places the code assumes anonymous pages should have SwapBacked flag set. MADV_FREE pages are anonymous pages but we are going to add them to LRU_INACTIVE_FILE list and clear SwapBacked flag for them. The assumption doesn't hold any more, so fix them. Link: http://lkml.kernel.org/r/3945232c0df3dd6c4ef001976f35a95f18dcb407.1487965799.git.shli@fb.com Signed-off-by: Shaohua Li <shli@fb.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									a128ca71fb
								
							
						
					
					
						commit
						d44d363f65
					
				
					 4 changed files with 7 additions and 8 deletions
				
			
		| 
						 | 
					@ -2399,7 +2399,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
 | 
						VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
 | 
				
			||||||
	VM_BUG_ON_PAGE(!PageLocked(page), page);
 | 
						VM_BUG_ON_PAGE(!PageLocked(page), page);
 | 
				
			||||||
	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 | 
					 | 
				
			||||||
	VM_BUG_ON_PAGE(!PageCompound(page), page);
 | 
						VM_BUG_ON_PAGE(!PageCompound(page), page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (PageAnon(head)) {
 | 
						if (PageAnon(head)) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -483,8 +483,7 @@ void __khugepaged_exit(struct mm_struct *mm)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void release_pte_page(struct page *page)
 | 
					static void release_pte_page(struct page *page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	/* 0 stands for page_is_file_cache(page) == false */
 | 
						dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
 | 
				
			||||||
	dec_node_page_state(page, NR_ISOLATED_ANON + 0);
 | 
					 | 
				
			||||||
	unlock_page(page);
 | 
						unlock_page(page);
 | 
				
			||||||
	putback_lru_page(page);
 | 
						putback_lru_page(page);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -532,7 +531,6 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		VM_BUG_ON_PAGE(PageCompound(page), page);
 | 
							VM_BUG_ON_PAGE(PageCompound(page), page);
 | 
				
			||||||
		VM_BUG_ON_PAGE(!PageAnon(page), page);
 | 
							VM_BUG_ON_PAGE(!PageAnon(page), page);
 | 
				
			||||||
		VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * We can do it before isolate_lru_page because the
 | 
							 * We can do it before isolate_lru_page because the
 | 
				
			||||||
| 
						 | 
					@ -579,8 +577,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
 | 
				
			||||||
			result = SCAN_DEL_PAGE_LRU;
 | 
								result = SCAN_DEL_PAGE_LRU;
 | 
				
			||||||
			goto out;
 | 
								goto out;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		/* 0 stands for page_is_file_cache(page) == false */
 | 
							inc_node_page_state(page,
 | 
				
			||||||
		inc_node_page_state(page, NR_ISOLATED_ANON + 0);
 | 
									NR_ISOLATED_ANON + page_is_file_cache(page));
 | 
				
			||||||
		VM_BUG_ON_PAGE(!PageLocked(page), page);
 | 
							VM_BUG_ON_PAGE(!PageLocked(page), page);
 | 
				
			||||||
		VM_BUG_ON_PAGE(PageLRU(page), page);
 | 
							VM_BUG_ON_PAGE(PageLRU(page), page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1944,7 +1944,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Prepare a page as a migration target */
 | 
						/* Prepare a page as a migration target */
 | 
				
			||||||
	__SetPageLocked(new_page);
 | 
						__SetPageLocked(new_page);
 | 
				
			||||||
	__SetPageSwapBacked(new_page);
 | 
						if (PageSwapBacked(page))
 | 
				
			||||||
 | 
							__SetPageSwapBacked(new_page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* anon mapping, we can simply copy page->mapping to the new page: */
 | 
						/* anon mapping, we can simply copy page->mapping to the new page: */
 | 
				
			||||||
	new_page->mapping = page->mapping;
 | 
						new_page->mapping = page->mapping;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1424,7 +1424,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 | 
				
			||||||
			 * Store the swap location in the pte.
 | 
								 * Store the swap location in the pte.
 | 
				
			||||||
			 * See handle_pte_fault() ...
 | 
								 * See handle_pte_fault() ...
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
			VM_BUG_ON_PAGE(!PageSwapCache(page), page);
 | 
								VM_BUG_ON_PAGE(!PageSwapCache(page) && PageSwapBacked(page),
 | 
				
			||||||
 | 
									page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (!PageDirty(page)) {
 | 
								if (!PageDirty(page)) {
 | 
				
			||||||
				/* It's a freeable page by MADV_FREE */
 | 
									/* It's a freeable page by MADV_FREE */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue