mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm/nommu: use alloc_pages_exact() rather than its own implementation
do_mmap_private() in nommu.c try to allocate physically contiguous pages with arbitrary size in some cases and we now have good abstract function to do exactly same thing, alloc_pages_exact(). So, change to use it. There is no functional change. This is the preparation step for support page owner feature accurately. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Dave Hansen <dave@sr71.net> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Jungsoo Son <jungsoo.son@lge.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									031bc5743f
								
							
						
					
					
						commit
						dbc8358c72
					
				
					 1 changed files with 11 additions and 22 deletions
				
			
		
							
								
								
									
										33
									
								
								mm/nommu.c
									
									
									
									
									
								
							
							
						
						
									
										33
									
								
								mm/nommu.c
									
									
									
									
									
								
							| 
						 | 
					@ -1149,8 +1149,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
 | 
				
			||||||
			   unsigned long len,
 | 
								   unsigned long len,
 | 
				
			||||||
			   unsigned long capabilities)
 | 
								   unsigned long capabilities)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct page *pages;
 | 
						unsigned long total, point;
 | 
				
			||||||
	unsigned long total, point, n;
 | 
					 | 
				
			||||||
	void *base;
 | 
						void *base;
 | 
				
			||||||
	int ret, order;
 | 
						int ret, order;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1182,33 +1181,23 @@ static int do_mmap_private(struct vm_area_struct *vma,
 | 
				
			||||||
	order = get_order(len);
 | 
						order = get_order(len);
 | 
				
			||||||
	kdebug("alloc order %d for %lx", order, len);
 | 
						kdebug("alloc order %d for %lx", order, len);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pages = alloc_pages(GFP_KERNEL, order);
 | 
					 | 
				
			||||||
	if (!pages)
 | 
					 | 
				
			||||||
		goto enomem;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	total = 1 << order;
 | 
						total = 1 << order;
 | 
				
			||||||
	atomic_long_add(total, &mmap_pages_allocated);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	point = len >> PAGE_SHIFT;
 | 
						point = len >> PAGE_SHIFT;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* we allocated a power-of-2 sized page set, so we may want to trim off
 | 
						/* we don't want to allocate a power-of-2 sized page set */
 | 
				
			||||||
	 * the excess */
 | 
					 | 
				
			||||||
	if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
 | 
						if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
 | 
				
			||||||
		while (total > point) {
 | 
							total = point;
 | 
				
			||||||
			order = ilog2(total - point);
 | 
							kdebug("try to alloc exact %lu pages", total);
 | 
				
			||||||
			n = 1 << order;
 | 
							base = alloc_pages_exact(len, GFP_KERNEL);
 | 
				
			||||||
			kdebug("shave %lu/%lu @%lu", n, total - point, total);
 | 
						} else {
 | 
				
			||||||
			atomic_long_sub(n, &mmap_pages_allocated);
 | 
							base = (void *)__get_free_pages(GFP_KERNEL, order);
 | 
				
			||||||
			total -= n;
 | 
					 | 
				
			||||||
			set_page_refcounted(pages + total);
 | 
					 | 
				
			||||||
			__free_pages(pages + total, order);
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (point = 1; point < total; point++)
 | 
						if (!base)
 | 
				
			||||||
		set_page_refcounted(&pages[point]);
 | 
							goto enomem;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						atomic_long_add(total, &mmap_pages_allocated);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	base = page_address(pages);
 | 
					 | 
				
			||||||
	region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
 | 
						region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
 | 
				
			||||||
	region->vm_start = (unsigned long) base;
 | 
						region->vm_start = (unsigned long) base;
 | 
				
			||||||
	region->vm_end   = region->vm_start + len;
 | 
						region->vm_end   = region->vm_start + len;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue