mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm/migrate: add CPU hotplug to demotion #ifdef
Once upon a time, the node demotion updates were driven solely by memory
hotplug events.  But now, there are handlers for both CPU and memory
hotplug.
However, the #ifdef around the code checks only memory hotplug.  A
system that has HOTPLUG_CPU=y but MEMORY_HOTPLUG=n would miss CPU
hotplug events.
Update the #ifdef around the common code.  Add memory and CPU-specific
#ifdefs for their handlers.  These memory/CPU #ifdefs avoid unused
function warnings when their Kconfig option is off.
[arnd@arndb.de: rework hotplug_memory_notifier() stub]
  Link: https://lkml.kernel.org/r/20211013144029.2154629-1-arnd@kernel.org
Link: https://lkml.kernel.org/r/20210924161255.E5FE8F7E@davehans-spike.ostc.intel.com
Fixes: 884a6e5d1f ("mm/migrate: update node demotion order on hotplug events")
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Wei Xu <weixugc@google.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Yang Shi <yang.shi@linux.alibaba.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									295be91f7e
								
							
						
					
					
						commit
						76af6a054d
					
				
					 4 changed files with 28 additions and 27 deletions
				
			
		| 
						 | 
				
			
			@ -160,7 +160,10 @@ int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
 | 
			
		|||
#define register_hotmemory_notifier(nb)		register_memory_notifier(nb)
 | 
			
		||||
#define unregister_hotmemory_notifier(nb) 	unregister_memory_notifier(nb)
 | 
			
		||||
#else
 | 
			
		||||
#define hotplug_memory_notifier(fn, pri)	({ 0; })
 | 
			
		||||
static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
 | 
			
		||||
{
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
/* These aren't inline functions due to a GCC bug. */
 | 
			
		||||
#define register_hotmemory_notifier(nb)    ({ (void)(nb); 0; })
 | 
			
		||||
#define unregister_hotmemory_notifier(nb)  ({ (void)(nb); })
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										42
									
								
								mm/migrate.c
									
									
									
									
									
								
							
							
						
						
									
										42
									
								
								mm/migrate.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -3066,7 +3066,7 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
 | 
			
		|||
EXPORT_SYMBOL(migrate_vma_finalize);
 | 
			
		||||
#endif /* CONFIG_DEVICE_PRIVATE */
 | 
			
		||||
 | 
			
		||||
#if defined(CONFIG_MEMORY_HOTPLUG)
 | 
			
		||||
#if defined(CONFIG_HOTPLUG_CPU)
 | 
			
		||||
/* Disable reclaim-based migration. */
 | 
			
		||||
static void __disable_all_migrate_targets(void)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -3208,25 +3208,6 @@ static void set_migration_target_nodes(void)
 | 
			
		|||
	put_online_mems();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * React to hotplug events that might affect the migration targets
 | 
			
		||||
 * like events that online or offline NUMA nodes.
 | 
			
		||||
 *
 | 
			
		||||
 * The ordering is also currently dependent on which nodes have
 | 
			
		||||
 * CPUs.  That means we need CPU on/offline notification too.
 | 
			
		||||
 */
 | 
			
		||||
static int migration_online_cpu(unsigned int cpu)
 | 
			
		||||
{
 | 
			
		||||
	set_migration_target_nodes();
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int migration_offline_cpu(unsigned int cpu)
 | 
			
		||||
{
 | 
			
		||||
	set_migration_target_nodes();
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * This leaves migrate-on-reclaim transiently disabled between
 | 
			
		||||
 * the MEM_GOING_OFFLINE and MEM_OFFLINE events.  This runs
 | 
			
		||||
| 
						 | 
				
			
			@ -3284,6 +3265,25 @@ static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
 | 
			
		|||
	return notifier_from_errno(0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * React to hotplug events that might affect the migration targets
 | 
			
		||||
 * like events that online or offline NUMA nodes.
 | 
			
		||||
 *
 | 
			
		||||
 * The ordering is also currently dependent on which nodes have
 | 
			
		||||
 * CPUs.  That means we need CPU on/offline notification too.
 | 
			
		||||
 */
 | 
			
		||||
static int migration_online_cpu(unsigned int cpu)
 | 
			
		||||
{
 | 
			
		||||
	set_migration_target_nodes();
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int migration_offline_cpu(unsigned int cpu)
 | 
			
		||||
{
 | 
			
		||||
	set_migration_target_nodes();
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int __init migrate_on_reclaim_init(void)
 | 
			
		||||
{
 | 
			
		||||
	int ret;
 | 
			
		||||
| 
						 | 
				
			
			@ -3303,4 +3303,4 @@ static int __init migrate_on_reclaim_init(void)
 | 
			
		|||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
late_initcall(migrate_on_reclaim_init);
 | 
			
		||||
#endif /* CONFIG_MEMORY_HOTPLUG */
 | 
			
		||||
#endif /* CONFIG_HOTPLUG_CPU */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -269,7 +269,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
 | 
			
		|||
	total_usage += table_size;
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
#ifdef CONFIG_MEMORY_HOTPLUG
 | 
			
		||||
 | 
			
		||||
static void free_page_ext(void *addr)
 | 
			
		||||
{
 | 
			
		||||
	if (is_vmalloc_addr(addr)) {
 | 
			
		||||
| 
						 | 
				
			
			@ -374,8 +374,6 @@ static int __meminit page_ext_callback(struct notifier_block *self,
 | 
			
		|||
	return notifier_from_errno(ret);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
void __init page_ext_init(void)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long pfn;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1095,7 +1095,7 @@ static int slab_offline_cpu(unsigned int cpu)
 | 
			
		|||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
 | 
			
		||||
#if defined(CONFIG_NUMA)
 | 
			
		||||
/*
 | 
			
		||||
 * Drains freelist for a node on each slab cache, used for memory hot-remove.
 | 
			
		||||
 * Returns -EBUSY if all objects cannot be drained so that the node is not
 | 
			
		||||
| 
						 | 
				
			
			@ -1157,7 +1157,7 @@ static int __meminit slab_memory_callback(struct notifier_block *self,
 | 
			
		|||
out:
 | 
			
		||||
	return notifier_from_errno(ret);
 | 
			
		||||
}
 | 
			
		||||
#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
 | 
			
		||||
#endif /* CONFIG_NUMA */
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * swap the static kmem_cache_node with kmalloced memory
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue