mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	mm: cleanup the gfp_mask handling in __vmalloc_area_node
Patch series "two small vmalloc cleanups". This patch (of 2): __vmalloc_area_node currently has four different gfp_t variables to just express this simple logic: - use the passed in mask, plus __GFP_NOWARN and __GFP_HIGHMEM (if suitable) for the underlying page allocation - use just the reclaim flags from the passed in mask plus __GFP_ZERO for allocating the page array Simplify this down to just use the pre-existing nested_gfp as-is for the page array allocation, and just the passed in gfp_mask for the page allocation, after conditionally ORing __GFP_HIGHMEM into it. This also makes the allocation warning a little more correct. Also initialize two variables at the time of declaration while touching this area. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Uladzislau Rezki (Sony) <urezki@gmail.com> Link: https://lkml.kernel.org/r/20201002124035.1539300-1-hch@lst.de Link: https://lkml.kernel.org/r/20201002124035.1539300-2-hch@lst.de Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									301fa9f2dd
								
							
						
					
					
						commit
						f255935b97
					
				
					 1 changed files with 10 additions and 12 deletions
				
			
		
							
								
								
									
										22
									
								
								mm/vmalloc.c
									
									
									
									
									
								
							
							
						
						
									
										22
									
								
								mm/vmalloc.c
									
									
									
									
									
								
							| 
						 | 
					@ -2461,21 +2461,19 @@ EXPORT_SYMBOL_GPL(vmap_pfn);
 | 
				
			||||||
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 | 
					static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 | 
				
			||||||
				 pgprot_t prot, int node)
 | 
									 pgprot_t prot, int node)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct page **pages;
 | 
					 | 
				
			||||||
	unsigned int nr_pages, array_size, i;
 | 
					 | 
				
			||||||
	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
 | 
						const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
 | 
				
			||||||
	const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
 | 
						unsigned int nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
 | 
				
			||||||
	const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
 | 
						unsigned int array_size = nr_pages * sizeof(struct page *), i;
 | 
				
			||||||
					0 :
 | 
						struct page **pages;
 | 
				
			||||||
					__GFP_HIGHMEM;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
 | 
						gfp_mask |= __GFP_NOWARN;
 | 
				
			||||||
	array_size = (nr_pages * sizeof(struct page *));
 | 
						if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
 | 
				
			||||||
 | 
							gfp_mask |= __GFP_HIGHMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Please note that the recursion is strictly bounded. */
 | 
						/* Please note that the recursion is strictly bounded. */
 | 
				
			||||||
	if (array_size > PAGE_SIZE) {
 | 
						if (array_size > PAGE_SIZE) {
 | 
				
			||||||
		pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
 | 
							pages = __vmalloc_node(array_size, 1, nested_gfp, node,
 | 
				
			||||||
				node, area->caller);
 | 
										area->caller);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		pages = kmalloc_node(array_size, nested_gfp, node);
 | 
							pages = kmalloc_node(array_size, nested_gfp, node);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -2493,9 +2491,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 | 
				
			||||||
		struct page *page;
 | 
							struct page *page;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (node == NUMA_NO_NODE)
 | 
							if (node == NUMA_NO_NODE)
 | 
				
			||||||
			page = alloc_page(alloc_mask|highmem_mask);
 | 
								page = alloc_page(gfp_mask);
 | 
				
			||||||
		else
 | 
							else
 | 
				
			||||||
			page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
 | 
								page = alloc_pages_node(node, gfp_mask, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (unlikely(!page)) {
 | 
							if (unlikely(!page)) {
 | 
				
			||||||
			/* Successfully allocated i pages, free them in __vfree() */
 | 
								/* Successfully allocated i pages, free them in __vfree() */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue