forked from mirrors/linux
		
	mm/sparsemem: Allocate mem_section at runtime for CONFIG_SPARSEMEM_EXTREME=y
Size of the mem_section[] array depends on the size of the physical address space. In preparation for boot-time switching between paging modes on x86-64 we need to make the allocation of mem_section[] dynamic, because otherwise we waste a lot of RAM: with CONFIG_NODE_SHIFT=10, mem_section[] size is 32kB for 4-level paging and 2MB for 5-level paging mode. The patch allocates the array on the first call to sparse_memory_present_with_active_regions(). Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@suse.de> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20170929140821.37654-2-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
							parent
							
								
									967535223f
								
							
						
					
					
						commit
						83e3c48729
					
				
					 3 changed files with 26 additions and 7 deletions
				
			
		|  | @ -1150,13 +1150,17 @@ struct mem_section { | ||||||
| #define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1) | #define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1) | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_SPARSEMEM_EXTREME | #ifdef CONFIG_SPARSEMEM_EXTREME | ||||||
| extern struct mem_section *mem_section[NR_SECTION_ROOTS]; | extern struct mem_section **mem_section; | ||||||
| #else | #else | ||||||
| extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; | extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| static inline struct mem_section *__nr_to_section(unsigned long nr) | static inline struct mem_section *__nr_to_section(unsigned long nr) | ||||||
| { | { | ||||||
|  | #ifdef CONFIG_SPARSEMEM_EXTREME | ||||||
|  | 	if (!mem_section) | ||||||
|  | 		return NULL; | ||||||
|  | #endif | ||||||
| 	if (!mem_section[SECTION_NR_TO_ROOT(nr)]) | 	if (!mem_section[SECTION_NR_TO_ROOT(nr)]) | ||||||
| 		return NULL; | 		return NULL; | ||||||
| 	return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; | 	return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; | ||||||
|  |  | ||||||
|  | @ -5646,6 +5646,16 @@ void __init sparse_memory_present_with_active_regions(int nid) | ||||||
| 	unsigned long start_pfn, end_pfn; | 	unsigned long start_pfn, end_pfn; | ||||||
| 	int i, this_nid; | 	int i, this_nid; | ||||||
| 
 | 
 | ||||||
|  | #ifdef CONFIG_SPARSEMEM_EXTREME | ||||||
|  | 	if (!mem_section) { | ||||||
|  | 		unsigned long size, align; | ||||||
|  | 
 | ||||||
|  | 		size = sizeof(struct mem_section) * NR_SECTION_ROOTS; | ||||||
|  | 		align = 1 << (INTERNODE_CACHE_SHIFT); | ||||||
|  | 		mem_section = memblock_virt_alloc(size, align); | ||||||
|  | 	} | ||||||
|  | #endif | ||||||
|  | 
 | ||||||
| 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) | 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) | ||||||
| 		memory_present(this_nid, start_pfn, end_pfn); | 		memory_present(this_nid, start_pfn, end_pfn); | ||||||
| } | } | ||||||
|  |  | ||||||
							
								
								
									
										17
									
								
								mm/sparse.c
									
									
									
									
									
								
							
							
						
						
									
										17
									
								
								mm/sparse.c
									
									
									
									
									
								
							|  | @ -22,8 +22,7 @@ | ||||||
|  * 1) mem_section	- memory sections, mem_map's for valid memory |  * 1) mem_section	- memory sections, mem_map's for valid memory | ||||||
|  */ |  */ | ||||||
| #ifdef CONFIG_SPARSEMEM_EXTREME | #ifdef CONFIG_SPARSEMEM_EXTREME | ||||||
| struct mem_section *mem_section[NR_SECTION_ROOTS] | struct mem_section **mem_section; | ||||||
| 	____cacheline_internodealigned_in_smp; |  | ||||||
| #else | #else | ||||||
| struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] | ||||||
| 	____cacheline_internodealigned_in_smp; | 	____cacheline_internodealigned_in_smp; | ||||||
|  | @ -100,7 +99,7 @@ static inline int sparse_index_init(unsigned long section_nr, int nid) | ||||||
| int __section_nr(struct mem_section* ms) | int __section_nr(struct mem_section* ms) | ||||||
| { | { | ||||||
| 	unsigned long root_nr; | 	unsigned long root_nr; | ||||||
| 	struct mem_section* root; | 	struct mem_section *root = NULL; | ||||||
| 
 | 
 | ||||||
| 	for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { | 	for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { | ||||||
| 		root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); | 		root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); | ||||||
|  | @ -111,7 +110,7 @@ int __section_nr(struct mem_section* ms) | ||||||
| 		     break; | 		     break; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	VM_BUG_ON(root_nr == NR_SECTION_ROOTS); | 	VM_BUG_ON(!root); | ||||||
| 
 | 
 | ||||||
| 	return (root_nr * SECTIONS_PER_ROOT) + (ms - root); | 	return (root_nr * SECTIONS_PER_ROOT) + (ms - root); | ||||||
| } | } | ||||||
|  | @ -329,11 +328,17 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, | ||||||
| static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | ||||||
| { | { | ||||||
| 	unsigned long usemap_snr, pgdat_snr; | 	unsigned long usemap_snr, pgdat_snr; | ||||||
| 	static unsigned long old_usemap_snr = NR_MEM_SECTIONS; | 	static unsigned long old_usemap_snr; | ||||||
| 	static unsigned long old_pgdat_snr = NR_MEM_SECTIONS; | 	static unsigned long old_pgdat_snr; | ||||||
| 	struct pglist_data *pgdat = NODE_DATA(nid); | 	struct pglist_data *pgdat = NODE_DATA(nid); | ||||||
| 	int usemap_nid; | 	int usemap_nid; | ||||||
| 
 | 
 | ||||||
|  | 	/* First call */ | ||||||
|  | 	if (!old_usemap_snr) { | ||||||
|  | 		old_usemap_snr = NR_MEM_SECTIONS; | ||||||
|  | 		old_pgdat_snr = NR_MEM_SECTIONS; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); | 	usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); | ||||||
| 	pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); | 	pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); | ||||||
| 	if (usemap_snr == pgdat_snr) | 	if (usemap_snr == pgdat_snr) | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Kirill A. Shutemov
						Kirill A. Shutemov