forked from mirrors/linux
		
	uprobe: use FOLL_SPLIT_PMD instead of FOLL_SPLIT
Use the newly added FOLL_SPLIT_PMD in uprobe. This preserves the huge page when the uprobe is enabled. When the uprobe is disabled, newer instances of the same application could still benefit from huge page. For the next step, we will enable khugepaged to regroup the pmd, so that existing instances of the application could also benefit from huge page after the uprobe is disabled. Link: http://lkml.kernel.org/r/20190815164525.1848545-5-songliubraving@fb.com Signed-off-by: Song Liu <songliubraving@fb.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reviewed-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Reviewed-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									bfe7b00de6
								
							
						
					
					
						commit
						5a52c9df62
					
				
					 1 changed files with 2 additions and 4 deletions
				
			
		| 
						 | 
					@ -155,7 +155,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct mm_struct *mm = vma->vm_mm;
 | 
						struct mm_struct *mm = vma->vm_mm;
 | 
				
			||||||
	struct page_vma_mapped_walk pvmw = {
 | 
						struct page_vma_mapped_walk pvmw = {
 | 
				
			||||||
		.page = old_page,
 | 
							.page = compound_head(old_page),
 | 
				
			||||||
		.vma = vma,
 | 
							.vma = vma,
 | 
				
			||||||
		.address = addr,
 | 
							.address = addr,
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
| 
						 | 
					@ -166,8 +166,6 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
 | 
				
			||||||
	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
 | 
						mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
 | 
				
			||||||
				addr + PAGE_SIZE);
 | 
									addr + PAGE_SIZE);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (new_page) {
 | 
						if (new_page) {
 | 
				
			||||||
		err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
 | 
							err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
 | 
				
			||||||
					    &memcg, false);
 | 
										    &memcg, false);
 | 
				
			||||||
| 
						 | 
					@ -481,7 +479,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 | 
				
			||||||
retry:
 | 
					retry:
 | 
				
			||||||
	/* Read the page with vaddr into memory */
 | 
						/* Read the page with vaddr into memory */
 | 
				
			||||||
	ret = get_user_pages_remote(NULL, mm, vaddr, 1,
 | 
						ret = get_user_pages_remote(NULL, mm, vaddr, 1,
 | 
				
			||||||
			FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL);
 | 
								FOLL_FORCE | FOLL_SPLIT_PMD, &old_page, &vma, NULL);
 | 
				
			||||||
	if (ret <= 0)
 | 
						if (ret <= 0)
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue