forked from mirrors/linux
		
	mm: move most of core MM initialization to mm/mm_init.c
The bulk of memory management initialization code is spread all over mm/page_alloc.c and makes navigating through page allocator functionality difficult. Move most of the functions marked __init and __meminit to mm/mm_init.c to make it better localized and allow some more spare room before mm/page_alloc.c reaches 10k lines. No functional changes. Link: https://lkml.kernel.org/r/20230321170513.2401534-4-rppt@kernel.org Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org> Acked-by: David Hildenbrand <david@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Doug Berger <opendmb@gmail.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@kernel.org> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									fce0b4213e
								
							
						
					
					
						commit
						9420f89db2
					
				
					 5 changed files with 2353 additions and 2339 deletions
				
			
		|  | @ -361,9 +361,4 @@ extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, | |||
| #endif | ||||
| void free_contig_range(unsigned long pfn, unsigned long nr_pages); | ||||
| 
 | ||||
| #ifdef CONFIG_CMA | ||||
| /* CMA stuff */ | ||||
| extern void init_cma_reserved_pageblock(struct page *page); | ||||
| #endif | ||||
| 
 | ||||
| #endif /* __LINUX_GFP_H */ | ||||
|  |  | |||
							
								
								
									
										1
									
								
								mm/cma.c
									
									
									
									
									
								
							
							
						
						
									
										1
									
								
								mm/cma.c
									
									
									
									
									
								
							|  | @ -33,6 +33,7 @@ | |||
| #include <linux/kmemleak.h> | ||||
| #include <trace/events/cma.h> | ||||
| 
 | ||||
| #include "internal.h" | ||||
| #include "cma.h" | ||||
| 
 | ||||
| struct cma cma_areas[MAX_CMA_AREAS]; | ||||
|  |  | |||
|  | @ -202,6 +202,8 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); | |||
|  * in mm/page_alloc.c | ||||
|  */ | ||||
| 
 | ||||
| extern char * const zone_names[MAX_NR_ZONES]; | ||||
| 
 | ||||
| /*
 | ||||
|  * Structure for holding the mostly immutable allocation parameters passed | ||||
|  * between functions involved in allocations, including the alloc_pages* | ||||
|  | @ -366,7 +368,29 @@ extern void __putback_isolated_page(struct page *page, unsigned int order, | |||
| extern void memblock_free_pages(struct page *page, unsigned long pfn, | ||||
| 					unsigned int order); | ||||
| extern void __free_pages_core(struct page *page, unsigned int order); | ||||
| 
 | ||||
| static inline void prep_compound_head(struct page *page, unsigned int order) | ||||
| { | ||||
| 	struct folio *folio = (struct folio *)page; | ||||
| 
 | ||||
| 	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); | ||||
| 	set_compound_order(page, order); | ||||
| 	atomic_set(&folio->_entire_mapcount, -1); | ||||
| 	atomic_set(&folio->_nr_pages_mapped, 0); | ||||
| 	atomic_set(&folio->_pincount, 0); | ||||
| } | ||||
| 
 | ||||
| static inline void prep_compound_tail(struct page *head, int tail_idx) | ||||
| { | ||||
| 	struct page *p = head + tail_idx; | ||||
| 
 | ||||
| 	p->mapping = TAIL_MAPPING; | ||||
| 	set_compound_head(p, head); | ||||
| 	set_page_private(p, 0); | ||||
| } | ||||
| 
 | ||||
| extern void prep_compound_page(struct page *page, unsigned int order); | ||||
| 
 | ||||
| extern void post_alloc_hook(struct page *page, unsigned int order, | ||||
| 					gfp_t gfp_flags); | ||||
| extern int user_min_free_kbytes; | ||||
|  | @ -377,6 +401,7 @@ extern void free_unref_page_list(struct list_head *list); | |||
| extern void zone_pcp_reset(struct zone *zone); | ||||
| extern void zone_pcp_disable(struct zone *zone); | ||||
| extern void zone_pcp_enable(struct zone *zone); | ||||
| extern void zone_pcp_init(struct zone *zone); | ||||
| 
 | ||||
| extern void *memmap_alloc(phys_addr_t size, phys_addr_t align, | ||||
| 			  phys_addr_t min_addr, | ||||
|  | @ -474,7 +499,12 @@ isolate_migratepages_range(struct compact_control *cc, | |||
| 
 | ||||
| int __alloc_contig_migrate_range(struct compact_control *cc, | ||||
| 					unsigned long start, unsigned long end); | ||||
| #endif | ||||
| 
 | ||||
| /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ | ||||
| void init_cma_reserved_pageblock(struct page *page); | ||||
| 
 | ||||
| #endif /* CONFIG_COMPACTION || CONFIG_CMA */ | ||||
| 
 | ||||
| int find_suitable_fallback(struct free_area *area, unsigned int order, | ||||
| 			int migratetype, bool only_stealable, bool *can_steal); | ||||
| 
 | ||||
|  | @ -658,6 +688,12 @@ static inline void vunmap_range_noflush(unsigned long start, unsigned long end) | |||
| #endif /* !CONFIG_MMU */ | ||||
| 
 | ||||
| /* Memory initialisation debug and verification */ | ||||
| #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT | ||||
| DECLARE_STATIC_KEY_TRUE(deferred_pages); | ||||
| 
 | ||||
| bool __init deferred_grow_zone(struct zone *zone, unsigned int order); | ||||
| #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ | ||||
| 
 | ||||
| enum mminit_level { | ||||
| 	MMINIT_WARNING, | ||||
| 	MMINIT_VERIFY, | ||||
|  |  | |||
							
								
								
									
										2304
									
								
								mm/mm_init.c
									
									
									
									
									
								
							
							
						
						
									
										2304
									
								
								mm/mm_init.c
									
									
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
							
								
								
									
										2344
									
								
								mm/page_alloc.c
									
									
									
									
									
								
							
							
						
						
									
										2344
									
								
								mm/page_alloc.c
									
									
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
		Loading…
	
		Reference in a new issue
	
	 Mike Rapoport (IBM)
						Mike Rapoport (IBM)