forked from mirrors/linux
		
	mm/hwpoison: disable pcp for page_handle_poison()
Recent changes by patch "mm/page_alloc: allow high-order pages to be stored on the per-cpu lists" makes kernels determine whether to use pcp by pcp_allowed_order(), which breaks soft-offline for hugetlb pages. Soft-offline dissolves a migration source page, then removes it from buddy free list, so it's assumed that any subpage of the soft-offlined hugepage are recognized as a buddy page just after returning from dissolve_free_huge_page(). pcp_allowed_order() returns true for hugetlb, so this assumption is no longer true. So disable pcp during dissolve_free_huge_page() and take_page_off_buddy() to prevent soft-offlined hugepages from linking to pcp lists. Soft-offline should not be common events so the impact on performance should be minimal. And I think that the optimization of Mel's patch could benefit to hugetlb so zone_pcp_disable() is called only in hwpoison context. Link: https://lkml.kernel.org/r/20210617092626.291006-1-nao.horiguchi@gmail.com Signed-off-by: Naoya Horiguchi <naoya.horiguchi@nec.com> Acked-by: Mel Gorman <mgorman@techsingularity.net> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: David Hildenbrand <david@redhat.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									7118fc2906
								
							
						
					
					
						commit
						510d25c92e
					
				
					 1 changed files with 16 additions and 3 deletions
				
			
		|  | @ -66,6 +66,19 @@ int sysctl_memory_failure_recovery __read_mostly = 1; | |||
| 
 | ||||
| atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0); | ||||
| 
 | ||||
| static bool __page_handle_poison(struct page *page) | ||||
| { | ||||
| 	bool ret; | ||||
| 
 | ||||
| 	zone_pcp_disable(page_zone(page)); | ||||
| 	ret = dissolve_free_huge_page(page); | ||||
| 	if (!ret) | ||||
| 		ret = take_page_off_buddy(page); | ||||
| 	zone_pcp_enable(page_zone(page)); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release) | ||||
| { | ||||
| 	if (hugepage_or_freepage) { | ||||
|  | @ -73,7 +86,7 @@ static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, boo | |||
| 		 * Doing this check for free pages is also fine since dissolve_free_huge_page | ||||
| 		 * returns 0 for non-hugetlb pages as well. | ||||
| 		 */ | ||||
| 		if (dissolve_free_huge_page(page) || !take_page_off_buddy(page)) | ||||
| 		if (!__page_handle_poison(page)) | ||||
| 			/*
 | ||||
| 			 * We could fail to take off the target page from buddy | ||||
| 			 * for example due to racy page allocation, but that's | ||||
|  | @ -985,7 +998,7 @@ static int me_huge_page(struct page *p, unsigned long pfn) | |||
| 		 */ | ||||
| 		if (PageAnon(hpage)) | ||||
| 			put_page(hpage); | ||||
| 		if (!dissolve_free_huge_page(p) && take_page_off_buddy(p)) { | ||||
| 		if (__page_handle_poison(p)) { | ||||
| 			page_ref_inc(p); | ||||
| 			res = MF_RECOVERED; | ||||
| 		} | ||||
|  | @ -1446,7 +1459,7 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags) | |||
| 			} | ||||
| 			unlock_page(head); | ||||
| 			res = MF_FAILED; | ||||
| 			if (!dissolve_free_huge_page(p) && take_page_off_buddy(p)) { | ||||
| 			if (__page_handle_poison(p)) { | ||||
| 				page_ref_inc(p); | ||||
| 				res = MF_RECOVERED; | ||||
| 			} | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Naoya Horiguchi
						Naoya Horiguchi