mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm: remove pfn_valid_within() and CONFIG_HOLES_IN_ZONE
Patch series "mm: remove pfn_valid_within() and CONFIG_HOLES_IN_ZONE". After recent updates to freeing unused parts of the memory map, no architecture can have holes in the memory map within a pageblock. This makes pfn_valid_within() check and CONFIG_HOLES_IN_ZONE configuration option redundant. The first patch removes them both in a mechanical way and the second patch simplifies memory_hotplug::test_pages_in_a_zone() that had pfn_valid_within() surrounded by more logic than simple if. This patch (of 2): After recent changes in freeing of the unused parts of the memory map and rework of pfn_valid() in arm and arm64 there are no architectures that can have holes in the memory map within a pageblock and so nothing can enable CONFIG_HOLES_IN_ZONE which guards non trivial implementation of pfn_valid_within(). With that, pfn_valid_within() is always hardwired to 1 and can be completely removed. Remove calls to pfn_valid_within() and CONFIG_HOLES_IN_ZONE. Link: https://lkml.kernel.org/r/20210713080035.7464-1-rppt@kernel.org Link: https://lkml.kernel.org/r/20210713080035.7464-2-rppt@kernel.org Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "Rafael J. Wysocki" <rafael@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									ac3332c447
								
							
						
					
					
						commit
						859a85ddf9
					
				
					 8 changed files with 11 additions and 75 deletions
				
			
		| 
						 | 
				
			
			@ -768,8 +768,6 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
 | 
			
		|||
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
 | 
			
		||||
static int __ref get_nid_for_pfn(unsigned long pfn)
 | 
			
		||||
{
 | 
			
		||||
	if (!pfn_valid_within(pfn))
 | 
			
		||||
		return -1;
 | 
			
		||||
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 | 
			
		||||
	if (system_state < SYSTEM_RUNNING)
 | 
			
		||||
		return early_pfn_to_nid(pfn);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1525,18 +1525,6 @@ void sparse_init(void);
 | 
			
		|||
#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
 | 
			
		||||
#endif /* CONFIG_SPARSEMEM */
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
 | 
			
		||||
 * need to check pfn validity within that MAX_ORDER_NR_PAGES block.
 | 
			
		||||
 * pfn_valid_within() should be used in this case; we optimise this away
 | 
			
		||||
 * when we have no holes within a MAX_ORDER_NR_PAGES block.
 | 
			
		||||
 */
 | 
			
		||||
#ifdef CONFIG_HOLES_IN_ZONE
 | 
			
		||||
#define pfn_valid_within(pfn) pfn_valid(pfn)
 | 
			
		||||
#else
 | 
			
		||||
#define pfn_valid_within(pfn) (1)
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#endif /* !__GENERATING_BOUNDS.H */
 | 
			
		||||
#endif /* !__ASSEMBLY__ */
 | 
			
		||||
#endif /* _LINUX_MMZONE_H */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -96,9 +96,6 @@ config HAVE_FAST_GUP
 | 
			
		|||
	depends on MMU
 | 
			
		||||
	bool
 | 
			
		||||
 | 
			
		||||
config HOLES_IN_ZONE
 | 
			
		||||
	bool
 | 
			
		||||
 | 
			
		||||
# Don't discard allocated memory used to track "memory" and "reserved" memblocks
 | 
			
		||||
# after early boot, so it can still be used to test for validity of memory.
 | 
			
		||||
# Also, memblocks are updated with memory hot(un)plug.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -306,16 +306,14 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
 | 
			
		|||
	 * is necessary for the block to be a migration source/target.
 | 
			
		||||
	 */
 | 
			
		||||
	do {
 | 
			
		||||
		if (pfn_valid_within(pfn)) {
 | 
			
		||||
			if (check_source && PageLRU(page)) {
 | 
			
		||||
				clear_pageblock_skip(page);
 | 
			
		||||
				return true;
 | 
			
		||||
			}
 | 
			
		||||
		if (check_source && PageLRU(page)) {
 | 
			
		||||
			clear_pageblock_skip(page);
 | 
			
		||||
			return true;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
			if (check_target && PageBuddy(page)) {
 | 
			
		||||
				clear_pageblock_skip(page);
 | 
			
		||||
				return true;
 | 
			
		||||
			}
 | 
			
		||||
		if (check_target && PageBuddy(page)) {
 | 
			
		||||
			clear_pageblock_skip(page);
 | 
			
		||||
			return true;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		page += (1 << PAGE_ALLOC_COSTLY_ORDER);
 | 
			
		||||
| 
						 | 
				
			
			@ -585,8 +583,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 | 
			
		|||
			break;
 | 
			
		||||
 | 
			
		||||
		nr_scanned++;
 | 
			
		||||
		if (!pfn_valid_within(blockpfn))
 | 
			
		||||
			goto isolate_fail;
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * For compound pages such as THP and hugetlbfs, we can save
 | 
			
		||||
| 
						 | 
				
			
			@ -885,8 +881,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 | 
			
		|||
			cond_resched();
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (!pfn_valid_within(low_pfn))
 | 
			
		||||
			goto isolate_fail;
 | 
			
		||||
		nr_scanned++;
 | 
			
		||||
 | 
			
		||||
		page = pfn_to_page(low_pfn);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1308,10 +1308,6 @@ struct zone *test_pages_in_a_zone(unsigned long start_pfn,
 | 
			
		|||
		for (; pfn < sec_end_pfn && pfn < end_pfn;
 | 
			
		||||
		     pfn += MAX_ORDER_NR_PAGES) {
 | 
			
		||||
			i = 0;
 | 
			
		||||
			/* This is just a CONFIG_HOLES_IN_ZONE check.*/
 | 
			
		||||
			while ((i < MAX_ORDER_NR_PAGES) &&
 | 
			
		||||
				!pfn_valid_within(pfn + i))
 | 
			
		||||
				i++;
 | 
			
		||||
			if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
 | 
			
		||||
				continue;
 | 
			
		||||
			/* Check if we got outside of the zone */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -594,8 +594,6 @@ static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 | 
			
		|||
 | 
			
		||||
static int page_is_consistent(struct zone *zone, struct page *page)
 | 
			
		||||
{
 | 
			
		||||
	if (!pfn_valid_within(page_to_pfn(page)))
 | 
			
		||||
		return 0;
 | 
			
		||||
	if (zone != page_zone(page))
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1025,16 +1023,12 @@ buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
 | 
			
		|||
	if (order >= MAX_ORDER - 2)
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	if (!pfn_valid_within(buddy_pfn))
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	combined_pfn = buddy_pfn & pfn;
 | 
			
		||||
	higher_page = page + (combined_pfn - pfn);
 | 
			
		||||
	buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
 | 
			
		||||
	higher_buddy = higher_page + (buddy_pfn - combined_pfn);
 | 
			
		||||
 | 
			
		||||
	return pfn_valid_within(buddy_pfn) &&
 | 
			
		||||
	       page_is_buddy(higher_page, higher_buddy, order + 1);
 | 
			
		||||
	return page_is_buddy(higher_page, higher_buddy, order + 1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -1095,8 +1089,6 @@ static inline void __free_one_page(struct page *page,
 | 
			
		|||
		buddy_pfn = __find_buddy_pfn(pfn, order);
 | 
			
		||||
		buddy = page + (buddy_pfn - pfn);
 | 
			
		||||
 | 
			
		||||
		if (!pfn_valid_within(buddy_pfn))
 | 
			
		||||
			goto done_merging;
 | 
			
		||||
		if (!page_is_buddy(page, buddy, order))
 | 
			
		||||
			goto done_merging;
 | 
			
		||||
		/*
 | 
			
		||||
| 
						 | 
				
			
			@ -1754,9 +1746,7 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
 | 
			
		|||
/*
 | 
			
		||||
 * Check that the whole (or subset of) a pageblock given by the interval of
 | 
			
		||||
 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
 | 
			
		||||
 * with the migration of free compaction scanner. The scanners then need to
 | 
			
		||||
 * use only pfn_valid_within() check for arches that allow holes within
 | 
			
		||||
 * pageblocks.
 | 
			
		||||
 * with the migration of free compaction scanner.
 | 
			
		||||
 *
 | 
			
		||||
 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
 | 
			
		||||
 *
 | 
			
		||||
| 
						 | 
				
			
			@ -1872,8 +1862,6 @@ static inline void __init pgdat_init_report_one_done(void)
 | 
			
		|||
 */
 | 
			
		||||
static inline bool __init deferred_pfn_valid(unsigned long pfn)
 | 
			
		||||
{
 | 
			
		||||
	if (!pfn_valid_within(pfn))
 | 
			
		||||
		return false;
 | 
			
		||||
	if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
 | 
			
		||||
		return false;
 | 
			
		||||
	return true;
 | 
			
		||||
| 
						 | 
				
			
			@ -2520,11 +2508,6 @@ static int move_freepages(struct zone *zone,
 | 
			
		|||
	int pages_moved = 0;
 | 
			
		||||
 | 
			
		||||
	for (pfn = start_pfn; pfn <= end_pfn;) {
 | 
			
		||||
		if (!pfn_valid_within(pfn)) {
 | 
			
		||||
			pfn++;
 | 
			
		||||
			continue;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		page = pfn_to_page(pfn);
 | 
			
		||||
		if (!PageBuddy(page)) {
 | 
			
		||||
			/*
 | 
			
		||||
| 
						 | 
				
			
			@ -8814,9 +8797,6 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	for (; iter < pageblock_nr_pages - offset; iter++) {
 | 
			
		||||
		if (!pfn_valid_within(pfn + iter))
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
		page = pfn_to_page(pfn + iter);
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -93,8 +93,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 | 
			
		|||
			buddy_pfn = __find_buddy_pfn(pfn, order);
 | 
			
		||||
			buddy = page + (buddy_pfn - pfn);
 | 
			
		||||
 | 
			
		||||
			if (pfn_valid_within(buddy_pfn) &&
 | 
			
		||||
			    !is_migrate_isolate_page(buddy)) {
 | 
			
		||||
			if (!is_migrate_isolate_page(buddy)) {
 | 
			
		||||
				__isolate_free_page(page, order);
 | 
			
		||||
				isolated_page = true;
 | 
			
		||||
			}
 | 
			
		||||
| 
						 | 
				
			
			@ -250,10 +249,6 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
 | 
			
		|||
	struct page *page;
 | 
			
		||||
 | 
			
		||||
	while (pfn < end_pfn) {
 | 
			
		||||
		if (!pfn_valid_within(pfn)) {
 | 
			
		||||
			pfn++;
 | 
			
		||||
			continue;
 | 
			
		||||
		}
 | 
			
		||||
		page = pfn_to_page(pfn);
 | 
			
		||||
		if (PageBuddy(page))
 | 
			
		||||
			/*
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -276,9 +276,6 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
 | 
			
		|||
		pageblock_mt = get_pageblock_migratetype(page);
 | 
			
		||||
 | 
			
		||||
		for (; pfn < block_end_pfn; pfn++) {
 | 
			
		||||
			if (!pfn_valid_within(pfn))
 | 
			
		||||
				continue;
 | 
			
		||||
 | 
			
		||||
			/* The pageblock is online, no need to recheck. */
 | 
			
		||||
			page = pfn_to_page(pfn);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -479,10 +476,6 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 | 
			
		|||
			continue;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		/* Check for holes within a MAX_ORDER area */
 | 
			
		||||
		if (!pfn_valid_within(pfn))
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
		page = pfn_to_page(pfn);
 | 
			
		||||
		if (PageBuddy(page)) {
 | 
			
		||||
			unsigned long freepage_order = buddy_order_unsafe(page);
 | 
			
		||||
| 
						 | 
				
			
			@ -560,14 +553,9 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
 | 
			
		|||
		block_end_pfn = min(block_end_pfn, end_pfn);
 | 
			
		||||
 | 
			
		||||
		for (; pfn < block_end_pfn; pfn++) {
 | 
			
		||||
			struct page *page;
 | 
			
		||||
			struct page *page = pfn_to_page(pfn);
 | 
			
		||||
			struct page_ext *page_ext;
 | 
			
		||||
 | 
			
		||||
			if (!pfn_valid_within(pfn))
 | 
			
		||||
				continue;
 | 
			
		||||
 | 
			
		||||
			page = pfn_to_page(pfn);
 | 
			
		||||
 | 
			
		||||
			if (page_zone(page) != zone)
 | 
			
		||||
				continue;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue