mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm: memory: improve copy_user_large_folio()
Use nr_pages instead of pages_per_huge_page and move the address alignment from copy_user_large_folio() into the callers since it is only needed when we don't know which address will be accessed. Link: https://lkml.kernel.org/r/20240618091242.2140164-4-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: David Hildenbrand <david@redhat.com> Cc: "Huang, Ying" <ying.huang@intel.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									5132633ee7
								
							
						
					
					
						commit
						530dd9926d
					
				
					 2 changed files with 12 additions and 17 deletions
				
			
		
							
								
								
									
										18
									
								
								mm/hugetlb.c
									
									
									
									
									
								
							
							
						
						
									
										18
									
								
								mm/hugetlb.c
									
									
									
									
									
								
							| 
						 | 
					@ -5492,9 +5492,8 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
 | 
				
			||||||
					ret = PTR_ERR(new_folio);
 | 
										ret = PTR_ERR(new_folio);
 | 
				
			||||||
					break;
 | 
										break;
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				ret = copy_user_large_folio(new_folio,
 | 
									ret = copy_user_large_folio(new_folio, pte_folio,
 | 
				
			||||||
							    pte_folio,
 | 
											ALIGN_DOWN(addr, sz), dst_vma);
 | 
				
			||||||
							    addr, dst_vma);
 | 
					 | 
				
			||||||
				folio_put(pte_folio);
 | 
									folio_put(pte_folio);
 | 
				
			||||||
				if (ret) {
 | 
									if (ret) {
 | 
				
			||||||
					folio_put(new_folio);
 | 
										folio_put(new_folio);
 | 
				
			||||||
| 
						 | 
					@ -6684,7 +6683,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
 | 
				
			||||||
	struct hstate *h = hstate_vma(dst_vma);
 | 
						struct hstate *h = hstate_vma(dst_vma);
 | 
				
			||||||
	struct address_space *mapping = dst_vma->vm_file->f_mapping;
 | 
						struct address_space *mapping = dst_vma->vm_file->f_mapping;
 | 
				
			||||||
	pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
 | 
						pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
 | 
				
			||||||
	unsigned long size;
 | 
						unsigned long size = huge_page_size(h);
 | 
				
			||||||
	int vm_shared = dst_vma->vm_flags & VM_SHARED;
 | 
						int vm_shared = dst_vma->vm_flags & VM_SHARED;
 | 
				
			||||||
	pte_t _dst_pte;
 | 
						pte_t _dst_pte;
 | 
				
			||||||
	spinlock_t *ptl;
 | 
						spinlock_t *ptl;
 | 
				
			||||||
| 
						 | 
					@ -6703,8 +6702,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		_dst_pte = make_pte_marker(PTE_MARKER_POISONED);
 | 
							_dst_pte = make_pte_marker(PTE_MARKER_POISONED);
 | 
				
			||||||
		set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte,
 | 
							set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
 | 
				
			||||||
				huge_page_size(h));
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* No need to invalidate - it was non-present before */
 | 
							/* No need to invalidate - it was non-present before */
 | 
				
			||||||
		update_mmu_cache(dst_vma, dst_addr, dst_pte);
 | 
							update_mmu_cache(dst_vma, dst_addr, dst_pte);
 | 
				
			||||||
| 
						 | 
					@ -6778,7 +6776,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
 | 
				
			||||||
			*foliop = NULL;
 | 
								*foliop = NULL;
 | 
				
			||||||
			goto out;
 | 
								goto out;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
 | 
							ret = copy_user_large_folio(folio, *foliop,
 | 
				
			||||||
 | 
										    ALIGN_DOWN(dst_addr, size), dst_vma);
 | 
				
			||||||
		folio_put(*foliop);
 | 
							folio_put(*foliop);
 | 
				
			||||||
		*foliop = NULL;
 | 
							*foliop = NULL;
 | 
				
			||||||
		if (ret) {
 | 
							if (ret) {
 | 
				
			||||||
| 
						 | 
					@ -6805,9 +6804,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Add shared, newly allocated pages to the page cache. */
 | 
						/* Add shared, newly allocated pages to the page cache. */
 | 
				
			||||||
	if (vm_shared && !is_continue) {
 | 
						if (vm_shared && !is_continue) {
 | 
				
			||||||
		size = i_size_read(mapping->host) >> huge_page_shift(h);
 | 
					 | 
				
			||||||
		ret = -EFAULT;
 | 
							ret = -EFAULT;
 | 
				
			||||||
		if (idx >= size)
 | 
							if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h)))
 | 
				
			||||||
			goto out_release_nounlock;
 | 
								goto out_release_nounlock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
| 
						 | 
					@ -6864,7 +6862,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
 | 
				
			||||||
	if (wp_enabled)
 | 
						if (wp_enabled)
 | 
				
			||||||
		_dst_pte = huge_pte_mkuffd_wp(_dst_pte);
 | 
							_dst_pte = huge_pte_mkuffd_wp(_dst_pte);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h));
 | 
						set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	hugetlb_count_add(pages_per_huge_page(h), dst_mm);
 | 
						hugetlb_count_add(pages_per_huge_page(h), dst_mm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										11
									
								
								mm/memory.c
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								mm/memory.c
									
									
									
									
									
								
							| 
						 | 
					@ -6521,20 +6521,17 @@ static int copy_subpage(unsigned long addr, int idx, void *arg)
 | 
				
			||||||
int copy_user_large_folio(struct folio *dst, struct folio *src,
 | 
					int copy_user_large_folio(struct folio *dst, struct folio *src,
 | 
				
			||||||
			  unsigned long addr_hint, struct vm_area_struct *vma)
 | 
								  unsigned long addr_hint, struct vm_area_struct *vma)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned int pages_per_huge_page = folio_nr_pages(dst);
 | 
						unsigned int nr_pages = folio_nr_pages(dst);
 | 
				
			||||||
	unsigned long addr = addr_hint &
 | 
					 | 
				
			||||||
		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
 | 
					 | 
				
			||||||
	struct copy_subpage_arg arg = {
 | 
						struct copy_subpage_arg arg = {
 | 
				
			||||||
		.dst = dst,
 | 
							.dst = dst,
 | 
				
			||||||
		.src = src,
 | 
							.src = src,
 | 
				
			||||||
		.vma = vma,
 | 
							.vma = vma,
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES))
 | 
						if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
 | 
				
			||||||
		return copy_user_gigantic_page(dst, src, addr, vma,
 | 
							return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages);
 | 
				
			||||||
					       pages_per_huge_page);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
 | 
						return process_huge_page(addr_hint, nr_pages, copy_subpage, &arg);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
long copy_folio_from_user(struct folio *dst_folio,
 | 
					long copy_folio_from_user(struct folio *dst_folio,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue