mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	mm, thp: do not cause memcg oom for thp
Commit2516035499("mm, thp: remove __GFP_NORETRY from khugepaged and madvised allocations") changed the page allocator to no longer detect thp allocations based on __GFP_NORETRY. It did not, however, modify the mem cgroup try_charge() path to avoid oom kill for either khugepaged collapsing or thp faulting. It is never expected to oom kill a process to allocate a hugepage for thp; reclaim is governed by the thp defrag mode and MADV_HUGEPAGE, but allocations (and charging) should fallback instead of oom killing processes. Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1803191409420.124411@chino.kir.corp.google.com Fixes:2516035499("mm, thp: remove __GFP_NORETRY from khugepaged and madvised allocations") Signed-off-by: David Rientjes <rientjes@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									1c610d5f93
								
							
						
					
					
						commit
						9d3c3354bb
					
				
					 2 changed files with 9 additions and 4 deletions
				
			
		| 
						 | 
				
			
			@ -555,7 +555,8 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
 | 
			
		|||
 | 
			
		||||
	VM_BUG_ON_PAGE(!PageCompound(page), page);
 | 
			
		||||
 | 
			
		||||
	if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) {
 | 
			
		||||
	if (mem_cgroup_try_charge(page, vma->vm_mm, gfp | __GFP_NORETRY, &memcg,
 | 
			
		||||
				  true)) {
 | 
			
		||||
		put_page(page);
 | 
			
		||||
		count_vm_event(THP_FAULT_FALLBACK);
 | 
			
		||||
		return VM_FAULT_FALLBACK;
 | 
			
		||||
| 
						 | 
				
			
			@ -1316,7 +1317,7 @@ int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm,
 | 
			
		||||
					huge_gfp, &memcg, true))) {
 | 
			
		||||
				huge_gfp | __GFP_NORETRY, &memcg, true))) {
 | 
			
		||||
		put_page(new_page);
 | 
			
		||||
		split_huge_pmd(vma, vmf->pmd, vmf->address);
 | 
			
		||||
		if (page)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -965,7 +965,9 @@ static void collapse_huge_page(struct mm_struct *mm,
 | 
			
		|||
		goto out_nolock;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
 | 
			
		||||
	/* Do not oom kill for khugepaged charges */
 | 
			
		||||
	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
 | 
			
		||||
					   &memcg, true))) {
 | 
			
		||||
		result = SCAN_CGROUP_CHARGE_FAIL;
 | 
			
		||||
		goto out_nolock;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -1324,7 +1326,9 @@ static void collapse_shmem(struct mm_struct *mm,
 | 
			
		|||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
 | 
			
		||||
	/* Do not oom kill for khugepaged charges */
 | 
			
		||||
	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
 | 
			
		||||
					   &memcg, true))) {
 | 
			
		||||
		result = SCAN_CGROUP_CHARGE_FAIL;
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue