forked from mirrors/linux
		
	mm: memcg: optimize parent iteration in memcg_rstat_updated()
In memcg_rstat_updated(), we iterate the memcg being updated and its
parents to update memcg->vmstats_percpu->stats_updates in the fast path
(i.e. no atomic updates). According to my math, this is 3 memory loads
(and potentially 3 cache misses) per memcg:
- Load the address of memcg->vmstats_percpu.
- Load vmstats_percpu->stats_updates (based on some percpu calculation).
- Load the address of the parent memcg.
Avoid most of the cache misses by caching a pointer from each struct
memcg_vmstats_percpu to its parent on the corresponding CPU. In this
case, for the first memcg we have 2 memory loads (same as above):
- Load the address of memcg->vmstats_percpu.
- Load vmstats_percpu->stats_updates (based on some percpu calculation).
Then for each additional memcg, we need a single load to get the
parent's stats_updates directly. This reduces the number of loads from
O(3N) to O(2+N) -- where N is the number of memcgs we need to iterate.
Additionally, stash a pointer to memcg->vmstats in each struct
memcg_vmstats_percpu such that we can access the atomic counter that all
CPUs fold into, memcg->vmstats->stats_updates.
memcg_should_flush_stats() is changed to memcg_vmstats_needs_flush() to
accept a struct memcg_vmstats pointer accordingly.
In struct memcg_vmstats_percpu, make sure both pointers together with
stats_updates live on the same cacheline. Finally, update
mem_cgroup_alloc() to take in a parent pointer and initialize the new
cache pointers on each CPU. The percpu loop in mem_cgroup_alloc() may
look concerning, but there are multiple similar loops in the cgroup
creation path (e.g. cgroup_rstat_init()), most of which are hidden
within alloc_percpu().
According to Oliver's testing [1], this fixes multiple 30-38%
regressions in vm-scalability, will-it-scale-tlb_flush2, and
will-it-scale-fallocate1. This comes at a cost of 2 more pointers per
CPU (<2KB on a machine with 128 CPUs).
[1] https://lore.kernel.org/lkml/ZbDJsfsZt2ITyo61@xsang-OptiPlex-9020/
[yosryahmed@google.com: fix struct memcg_vmstats_percpu size and alignment]
  Link: https://lkml.kernel.org/r/20240203044612.1234216-1-yosryahmed@google.com
Link: https://lkml.kernel.org/r/20240124100023.660032-1-yosryahmed@google.com
Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
Fixes: 8d59d2214c ("mm: memcg: make stats flushing threshold per-memcg")
Tested-by: kernel test robot <oliver.sang@intel.com>
Reported-by: kernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202401221624.cb53a8ca-oliver.sang@intel.com
Acked-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Greg Thelen <gthelen@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									67b8bcbaed
								
							
						
					
					
						commit
						9cee7e8ef3
					
				
					 1 changed files with 35 additions and 21 deletions
				
			
		|  | @ -621,6 +621,15 @@ static inline int memcg_events_index(enum vm_event_item idx) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| struct memcg_vmstats_percpu { | struct memcg_vmstats_percpu { | ||||||
|  | 	/* Stats updates since the last flush */ | ||||||
|  | 	unsigned int			stats_updates; | ||||||
|  | 
 | ||||||
|  | 	/* Cached pointers for fast iteration in memcg_rstat_updated() */ | ||||||
|  | 	struct memcg_vmstats_percpu	*parent; | ||||||
|  | 	struct memcg_vmstats		*vmstats; | ||||||
|  | 
 | ||||||
|  | 	/* The above should fit a single cacheline for memcg_rstat_updated() */ | ||||||
|  | 
 | ||||||
| 	/* Local (CPU and cgroup) page state & events */ | 	/* Local (CPU and cgroup) page state & events */ | ||||||
| 	long			state[MEMCG_NR_STAT]; | 	long			state[MEMCG_NR_STAT]; | ||||||
| 	unsigned long		events[NR_MEMCG_EVENTS]; | 	unsigned long		events[NR_MEMCG_EVENTS]; | ||||||
|  | @ -632,10 +641,7 @@ struct memcg_vmstats_percpu { | ||||||
| 	/* Cgroup1: threshold notifications & softlimit tree updates */ | 	/* Cgroup1: threshold notifications & softlimit tree updates */ | ||||||
| 	unsigned long		nr_page_events; | 	unsigned long		nr_page_events; | ||||||
| 	unsigned long		targets[MEM_CGROUP_NTARGETS]; | 	unsigned long		targets[MEM_CGROUP_NTARGETS]; | ||||||
| 
 | } ____cacheline_aligned; | ||||||
| 	/* Stats updates since the last flush */ |  | ||||||
| 	unsigned int		stats_updates; |  | ||||||
| }; |  | ||||||
| 
 | 
 | ||||||
| struct memcg_vmstats { | struct memcg_vmstats { | ||||||
| 	/* Aggregated (CPU and subtree) page state & events */ | 	/* Aggregated (CPU and subtree) page state & events */ | ||||||
|  | @ -698,36 +704,35 @@ static void memcg_stats_unlock(void) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| static bool memcg_should_flush_stats(struct mem_cgroup *memcg) | static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats) | ||||||
| { | { | ||||||
| 	return atomic64_read(&memcg->vmstats->stats_updates) > | 	return atomic64_read(&vmstats->stats_updates) > | ||||||
| 		MEMCG_CHARGE_BATCH * num_online_cpus(); | 		MEMCG_CHARGE_BATCH * num_online_cpus(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val) | static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val) | ||||||
| { | { | ||||||
|  | 	struct memcg_vmstats_percpu *statc; | ||||||
| 	int cpu = smp_processor_id(); | 	int cpu = smp_processor_id(); | ||||||
| 	unsigned int x; |  | ||||||
| 
 | 
 | ||||||
| 	if (!val) | 	if (!val) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
| 	cgroup_rstat_updated(memcg->css.cgroup, cpu); | 	cgroup_rstat_updated(memcg->css.cgroup, cpu); | ||||||
| 
 | 	statc = this_cpu_ptr(memcg->vmstats_percpu); | ||||||
| 	for (; memcg; memcg = parent_mem_cgroup(memcg)) { | 	for (; statc; statc = statc->parent) { | ||||||
| 		x = __this_cpu_add_return(memcg->vmstats_percpu->stats_updates, | 		statc->stats_updates += abs(val); | ||||||
| 					  abs(val)); | 		if (statc->stats_updates < MEMCG_CHARGE_BATCH) | ||||||
| 
 |  | ||||||
| 		if (x < MEMCG_CHARGE_BATCH) |  | ||||||
| 			continue; | 			continue; | ||||||
| 
 | 
 | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * If @memcg is already flush-able, increasing stats_updates is | 		 * If @memcg is already flush-able, increasing stats_updates is | ||||||
| 		 * redundant. Avoid the overhead of the atomic update. | 		 * redundant. Avoid the overhead of the atomic update. | ||||||
| 		 */ | 		 */ | ||||||
| 		if (!memcg_should_flush_stats(memcg)) | 		if (!memcg_vmstats_needs_flush(statc->vmstats)) | ||||||
| 			atomic64_add(x, &memcg->vmstats->stats_updates); | 			atomic64_add(statc->stats_updates, | ||||||
| 		__this_cpu_write(memcg->vmstats_percpu->stats_updates, 0); | 				     &statc->vmstats->stats_updates); | ||||||
|  | 		statc->stats_updates = 0; | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -756,7 +761,7 @@ void mem_cgroup_flush_stats(struct mem_cgroup *memcg) | ||||||
| 	if (!memcg) | 	if (!memcg) | ||||||
| 		memcg = root_mem_cgroup; | 		memcg = root_mem_cgroup; | ||||||
| 
 | 
 | ||||||
| 	if (memcg_should_flush_stats(memcg)) | 	if (memcg_vmstats_needs_flush(memcg->vmstats)) | ||||||
| 		do_flush_stats(memcg); | 		do_flush_stats(memcg); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -770,7 +775,7 @@ void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg) | ||||||
| static void flush_memcg_stats_dwork(struct work_struct *w) | static void flush_memcg_stats_dwork(struct work_struct *w) | ||||||
| { | { | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Deliberately ignore memcg_should_flush_stats() here so that flushing | 	 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing | ||||||
| 	 * in latency-sensitive paths is as cheap as possible. | 	 * in latency-sensitive paths is as cheap as possible. | ||||||
| 	 */ | 	 */ | ||||||
| 	do_flush_stats(root_mem_cgroup); | 	do_flush_stats(root_mem_cgroup); | ||||||
|  | @ -5477,10 +5482,11 @@ static void mem_cgroup_free(struct mem_cgroup *memcg) | ||||||
| 	__mem_cgroup_free(memcg); | 	__mem_cgroup_free(memcg); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static struct mem_cgroup *mem_cgroup_alloc(void) | static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent) | ||||||
| { | { | ||||||
|  | 	struct memcg_vmstats_percpu *statc, *pstatc; | ||||||
| 	struct mem_cgroup *memcg; | 	struct mem_cgroup *memcg; | ||||||
| 	int node; | 	int node, cpu; | ||||||
| 	int __maybe_unused i; | 	int __maybe_unused i; | ||||||
| 	long error = -ENOMEM; | 	long error = -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -5504,6 +5510,14 @@ static struct mem_cgroup *mem_cgroup_alloc(void) | ||||||
| 	if (!memcg->vmstats_percpu) | 	if (!memcg->vmstats_percpu) | ||||||
| 		goto fail; | 		goto fail; | ||||||
| 
 | 
 | ||||||
|  | 	for_each_possible_cpu(cpu) { | ||||||
|  | 		if (parent) | ||||||
|  | 			pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu); | ||||||
|  | 		statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); | ||||||
|  | 		statc->parent = parent ? pstatc : NULL; | ||||||
|  | 		statc->vmstats = memcg->vmstats; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	for_each_node(node) | 	for_each_node(node) | ||||||
| 		if (alloc_mem_cgroup_per_node_info(memcg, node)) | 		if (alloc_mem_cgroup_per_node_info(memcg, node)) | ||||||
| 			goto fail; | 			goto fail; | ||||||
|  | @ -5549,7 +5563,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) | ||||||
| 	struct mem_cgroup *memcg, *old_memcg; | 	struct mem_cgroup *memcg, *old_memcg; | ||||||
| 
 | 
 | ||||||
| 	old_memcg = set_active_memcg(parent); | 	old_memcg = set_active_memcg(parent); | ||||||
| 	memcg = mem_cgroup_alloc(); | 	memcg = mem_cgroup_alloc(parent); | ||||||
| 	set_active_memcg(old_memcg); | 	set_active_memcg(old_memcg); | ||||||
| 	if (IS_ERR(memcg)) | 	if (IS_ERR(memcg)) | ||||||
| 		return ERR_CAST(memcg); | 		return ERR_CAST(memcg); | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Yosry Ahmed
						Yosry Ahmed