mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm: memcg/slab: use a single set of kmem_caches for all accounted allocations
This is fairly big but mostly red patch, which makes all accounted slab allocations use a single set of kmem_caches instead of creating a separate set for each memory cgroup. Because the number of non-root kmem_caches is now capped by the number of root kmem_caches, there is no need to shrink or destroy them prematurely. They can be perfectly destroyed together with their root counterparts. This allows to dramatically simplify the management of non-root kmem_caches and delete a ton of code. This patch performs the following changes: 1) introduces memcg_params.memcg_cache pointer to represent the kmem_cache which will be used for all non-root allocations 2) reuses the existing memcg kmem_cache creation mechanism to create memcg kmem_cache on the first allocation attempt 3) memcg kmem_caches are named <kmemcache_name>-memcg, e.g. dentry-memcg 4) simplifies memcg_kmem_get_cache() to just return memcg kmem_cache or schedule it's creation and return the root cache 5) removes almost all non-root kmem_cache management code (separate refcounter, reparenting, shrinking, etc) 6) makes slab debugfs to display root_mem_cgroup css id and never show :dead and :deact flags in the memcg_slabinfo attribute. Following patches in the series will simplify the kmem_cache creation. Signed-off-by: Roman Gushchin <guro@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Shakeel Butt <shakeelb@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/20200623174037.3951353-13-guro@fb.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									0f876e4dc5
								
							
						
					
					
						commit
						9855609bde
					
				
					 7 changed files with 133 additions and 697 deletions
				
			
		| 
						 | 
					@ -317,7 +317,6 @@ struct mem_cgroup {
 | 
				
			||||||
        /* Index in the kmem_cache->memcg_params.memcg_caches array */
 | 
					        /* Index in the kmem_cache->memcg_params.memcg_caches array */
 | 
				
			||||||
	int kmemcg_id;
 | 
						int kmemcg_id;
 | 
				
			||||||
	enum memcg_kmem_state kmem_state;
 | 
						enum memcg_kmem_state kmem_state;
 | 
				
			||||||
	struct list_head kmem_caches;
 | 
					 | 
				
			||||||
	struct obj_cgroup __rcu *objcg;
 | 
						struct obj_cgroup __rcu *objcg;
 | 
				
			||||||
	struct list_head objcg_list; /* list of inherited objcgs */
 | 
						struct list_head objcg_list; /* list of inherited objcgs */
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					@ -1404,9 +1403,7 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep,
 | 
					struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
 | 
				
			||||||
					struct obj_cgroup **objcgp);
 | 
					 | 
				
			||||||
void memcg_kmem_put_cache(struct kmem_cache *cachep);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_MEMCG_KMEM
 | 
					#ifdef CONFIG_MEMCG_KMEM
 | 
				
			||||||
int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
 | 
					int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -155,8 +155,7 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name,
 | 
				
			||||||
void kmem_cache_destroy(struct kmem_cache *);
 | 
					void kmem_cache_destroy(struct kmem_cache *);
 | 
				
			||||||
int kmem_cache_shrink(struct kmem_cache *);
 | 
					int kmem_cache_shrink(struct kmem_cache *);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
 | 
					void memcg_create_kmem_cache(struct kmem_cache *cachep);
 | 
				
			||||||
void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Please use this macro to create slab caches. Simply specify the
 | 
					 * Please use this macro to create slab caches. Simply specify the
 | 
				
			||||||
| 
						 | 
					@ -580,8 +579,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 | 
				
			||||||
	return __kmalloc_node(size, flags, node);
 | 
						return __kmalloc_node(size, flags, node);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int memcg_update_all_caches(int num_memcgs);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * kmalloc_array - allocate memory for an array.
 | 
					 * kmalloc_array - allocate memory for an array.
 | 
				
			||||||
 * @n: number of elements.
 | 
					 * @n: number of elements.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										143
									
								
								mm/memcontrol.c
									
									
									
									
									
								
							
							
						
						
									
										143
									
								
								mm/memcontrol.c
									
									
									
									
									
								
							| 
						 | 
					@ -350,7 +350,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
 | 
					 * This will be used as a shrinker list's index.
 | 
				
			||||||
 * The main reason for not using cgroup id for this:
 | 
					 * The main reason for not using cgroup id for this:
 | 
				
			||||||
 *  this works better in sparse environments, where we have a lot of memcgs,
 | 
					 *  this works better in sparse environments, where we have a lot of memcgs,
 | 
				
			||||||
 *  but only a few kmem-limited. Or also, if we have, for instance, 200
 | 
					 *  but only a few kmem-limited. Or also, if we have, for instance, 200
 | 
				
			||||||
| 
						 | 
					@ -569,9 +569,6 @@ ino_t page_cgroup_ino(struct page *page)
 | 
				
			||||||
	unsigned long ino = 0;
 | 
						unsigned long ino = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rcu_read_lock();
 | 
						rcu_read_lock();
 | 
				
			||||||
	if (PageSlab(page) && !PageTail(page)) {
 | 
					 | 
				
			||||||
		memcg = memcg_from_slab_page(page);
 | 
					 | 
				
			||||||
	} else {
 | 
					 | 
				
			||||||
	memcg = page->mem_cgroup;
 | 
						memcg = page->mem_cgroup;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					@ -582,7 +579,6 @@ ino_t page_cgroup_ino(struct page *page)
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if ((unsigned long) memcg & 0x1UL)
 | 
						if ((unsigned long) memcg & 0x1UL)
 | 
				
			||||||
		memcg = NULL;
 | 
							memcg = NULL;
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while (memcg && !(memcg->css.flags & CSS_ONLINE))
 | 
						while (memcg && !(memcg->css.flags & CSS_ONLINE))
 | 
				
			||||||
		memcg = parent_mem_cgroup(memcg);
 | 
							memcg = parent_mem_cgroup(memcg);
 | 
				
			||||||
| 
						 | 
					@ -2822,12 +2818,18 @@ struct mem_cgroup *mem_cgroup_from_obj(void *p)
 | 
				
			||||||
	page = virt_to_head_page(p);
 | 
						page = virt_to_head_page(p);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Slab pages don't have page->mem_cgroup set because corresponding
 | 
						 * Slab objects are accounted individually, not per-page.
 | 
				
			||||||
	 * kmem caches can be reparented during the lifetime. That's why
 | 
						 * Memcg membership data for each individual object is saved in
 | 
				
			||||||
	 * memcg_from_slab_page() should be used instead.
 | 
						 * the page->obj_cgroups.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (PageSlab(page))
 | 
						if (page_has_obj_cgroups(page)) {
 | 
				
			||||||
		return memcg_from_slab_page(page);
 | 
							struct obj_cgroup *objcg;
 | 
				
			||||||
 | 
							unsigned int off;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							off = obj_to_index(page->slab_cache, page, p);
 | 
				
			||||||
 | 
							objcg = page_obj_cgroups(page)[off];
 | 
				
			||||||
 | 
							return obj_cgroup_memcg(objcg);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* All other pages use page->mem_cgroup */
 | 
						/* All other pages use page->mem_cgroup */
 | 
				
			||||||
	return page->mem_cgroup;
 | 
						return page->mem_cgroup;
 | 
				
			||||||
| 
						 | 
					@ -2882,8 +2884,6 @@ static int memcg_alloc_cache_id(void)
 | 
				
			||||||
	else if (size > MEMCG_CACHES_MAX_SIZE)
 | 
						else if (size > MEMCG_CACHES_MAX_SIZE)
 | 
				
			||||||
		size = MEMCG_CACHES_MAX_SIZE;
 | 
							size = MEMCG_CACHES_MAX_SIZE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = memcg_update_all_caches(size);
 | 
					 | 
				
			||||||
	if (!err)
 | 
					 | 
				
			||||||
	err = memcg_update_all_list_lrus(size);
 | 
						err = memcg_update_all_list_lrus(size);
 | 
				
			||||||
	if (!err)
 | 
						if (!err)
 | 
				
			||||||
		memcg_nr_cache_ids = size;
 | 
							memcg_nr_cache_ids = size;
 | 
				
			||||||
| 
						 | 
					@ -2903,7 +2903,6 @@ static void memcg_free_cache_id(int id)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct memcg_kmem_cache_create_work {
 | 
					struct memcg_kmem_cache_create_work {
 | 
				
			||||||
	struct mem_cgroup *memcg;
 | 
					 | 
				
			||||||
	struct kmem_cache *cachep;
 | 
						struct kmem_cache *cachep;
 | 
				
			||||||
	struct work_struct work;
 | 
						struct work_struct work;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
| 
						 | 
					@ -2912,33 +2911,24 @@ static void memcg_kmem_cache_create_func(struct work_struct *w)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct memcg_kmem_cache_create_work *cw =
 | 
						struct memcg_kmem_cache_create_work *cw =
 | 
				
			||||||
		container_of(w, struct memcg_kmem_cache_create_work, work);
 | 
							container_of(w, struct memcg_kmem_cache_create_work, work);
 | 
				
			||||||
	struct mem_cgroup *memcg = cw->memcg;
 | 
					 | 
				
			||||||
	struct kmem_cache *cachep = cw->cachep;
 | 
						struct kmem_cache *cachep = cw->cachep;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	memcg_create_kmem_cache(memcg, cachep);
 | 
						memcg_create_kmem_cache(cachep);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	css_put(&memcg->css);
 | 
					 | 
				
			||||||
	kfree(cw);
 | 
						kfree(cw);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Enqueue the creation of a per-memcg kmem_cache.
 | 
					 * Enqueue the creation of a per-memcg kmem_cache.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
 | 
					static void memcg_schedule_kmem_cache_create(struct kmem_cache *cachep)
 | 
				
			||||||
					       struct kmem_cache *cachep)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct memcg_kmem_cache_create_work *cw;
 | 
						struct memcg_kmem_cache_create_work *cw;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!css_tryget_online(&memcg->css))
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
 | 
						cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
 | 
				
			||||||
	if (!cw) {
 | 
						if (!cw)
 | 
				
			||||||
		css_put(&memcg->css);
 | 
					 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cw->memcg = memcg;
 | 
					 | 
				
			||||||
	cw->cachep = cachep;
 | 
						cw->cachep = cachep;
 | 
				
			||||||
	INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
 | 
						INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2946,102 +2936,26 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
 | 
					 * memcg_kmem_get_cache: select memcg or root cache for allocation
 | 
				
			||||||
 * @cachep: the original global kmem cache
 | 
					 * @cachep: the original global kmem cache
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * Return the kmem_cache we're supposed to use for a slab allocation.
 | 
					 * Return the kmem_cache we're supposed to use for a slab allocation.
 | 
				
			||||||
 * We try to use the current memcg's version of the cache.
 | 
					 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * If the cache does not exist yet, if we are the first user of it, we
 | 
					 * If the cache does not exist yet, if we are the first user of it, we
 | 
				
			||||||
 * create it asynchronously in a workqueue and let the current allocation
 | 
					 * create it asynchronously in a workqueue and let the current allocation
 | 
				
			||||||
 * go through with the original cache.
 | 
					 * go through with the original cache.
 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * This function takes a reference to the cache it returns to assure it
 | 
					 | 
				
			||||||
 * won't get destroyed while we are working with it. Once the caller is
 | 
					 | 
				
			||||||
 * done with it, memcg_kmem_put_cache() must be called to release the
 | 
					 | 
				
			||||||
 * reference.
 | 
					 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep,
 | 
					struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
 | 
				
			||||||
					struct obj_cgroup **objcgp)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct mem_cgroup *memcg;
 | 
					 | 
				
			||||||
	struct kmem_cache *memcg_cachep;
 | 
						struct kmem_cache *memcg_cachep;
 | 
				
			||||||
	struct memcg_cache_array *arr;
 | 
					 | 
				
			||||||
	int kmemcg_id;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	VM_BUG_ON(!is_root_cache(cachep));
 | 
						memcg_cachep = READ_ONCE(cachep->memcg_params.memcg_cache);
 | 
				
			||||||
 | 
						if (unlikely(!memcg_cachep)) {
 | 
				
			||||||
	if (memcg_kmem_bypass())
 | 
							memcg_schedule_kmem_cache_create(cachep);
 | 
				
			||||||
		return cachep;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	rcu_read_lock();
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (unlikely(current->active_memcg))
 | 
					 | 
				
			||||||
		memcg = current->active_memcg;
 | 
					 | 
				
			||||||
	else
 | 
					 | 
				
			||||||
		memcg = mem_cgroup_from_task(current);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!memcg || memcg == root_mem_cgroup)
 | 
					 | 
				
			||||||
		goto out_unlock;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	kmemcg_id = READ_ONCE(memcg->kmemcg_id);
 | 
					 | 
				
			||||||
	if (kmemcg_id < 0)
 | 
					 | 
				
			||||||
		goto out_unlock;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	arr = rcu_dereference(cachep->memcg_params.memcg_caches);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * Make sure we will access the up-to-date value. The code updating
 | 
					 | 
				
			||||||
	 * memcg_caches issues a write barrier to match the data dependency
 | 
					 | 
				
			||||||
	 * barrier inside READ_ONCE() (see memcg_create_kmem_cache()).
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	memcg_cachep = READ_ONCE(arr->entries[kmemcg_id]);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * If we are in a safe context (can wait, and not in interrupt
 | 
					 | 
				
			||||||
	 * context), we could be be predictable and return right away.
 | 
					 | 
				
			||||||
	 * This would guarantee that the allocation being performed
 | 
					 | 
				
			||||||
	 * already belongs in the new cache.
 | 
					 | 
				
			||||||
	 *
 | 
					 | 
				
			||||||
	 * However, there are some clashes that can arrive from locking.
 | 
					 | 
				
			||||||
	 * For instance, because we acquire the slab_mutex while doing
 | 
					 | 
				
			||||||
	 * memcg_create_kmem_cache, this means no further allocation
 | 
					 | 
				
			||||||
	 * could happen with the slab_mutex held. So it's better to
 | 
					 | 
				
			||||||
	 * defer everything.
 | 
					 | 
				
			||||||
	 *
 | 
					 | 
				
			||||||
	 * If the memcg is dying or memcg_cache is about to be released,
 | 
					 | 
				
			||||||
	 * don't bother creating new kmem_caches. Because memcg_cachep
 | 
					 | 
				
			||||||
	 * is ZEROed as the fist step of kmem offlining, we don't need
 | 
					 | 
				
			||||||
	 * percpu_ref_tryget_live() here. css_tryget_online() check in
 | 
					 | 
				
			||||||
	 * memcg_schedule_kmem_cache_create() will prevent us from
 | 
					 | 
				
			||||||
	 * creation of a new kmem_cache.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (unlikely(!memcg_cachep))
 | 
					 | 
				
			||||||
		memcg_schedule_kmem_cache_create(memcg, cachep);
 | 
					 | 
				
			||||||
	else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt)) {
 | 
					 | 
				
			||||||
		struct obj_cgroup *objcg = rcu_dereference(memcg->objcg);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (!objcg || !obj_cgroup_tryget(objcg)) {
 | 
					 | 
				
			||||||
			percpu_ref_put(&memcg_cachep->memcg_params.refcnt);
 | 
					 | 
				
			||||||
			goto out_unlock;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		*objcgp = objcg;
 | 
					 | 
				
			||||||
		cachep = memcg_cachep;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
out_unlock:
 | 
					 | 
				
			||||||
	rcu_read_unlock();
 | 
					 | 
				
			||||||
		return cachep;
 | 
							return cachep;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
						return memcg_cachep;
 | 
				
			||||||
 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
 | 
					 | 
				
			||||||
 * @cachep: the cache returned by memcg_kmem_get_cache
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
void memcg_kmem_put_cache(struct kmem_cache *cachep)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (!is_root_cache(cachep))
 | 
					 | 
				
			||||||
		percpu_ref_put(&cachep->memcg_params.refcnt);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					@ -3731,7 +3645,6 @@ static int memcg_online_kmem(struct mem_cgroup *memcg)
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	memcg->kmemcg_id = memcg_id;
 | 
						memcg->kmemcg_id = memcg_id;
 | 
				
			||||||
	memcg->kmem_state = KMEM_ONLINE;
 | 
						memcg->kmem_state = KMEM_ONLINE;
 | 
				
			||||||
	INIT_LIST_HEAD(&memcg->kmem_caches);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -3744,22 +3657,13 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (memcg->kmem_state != KMEM_ONLINE)
 | 
						if (memcg->kmem_state != KMEM_ONLINE)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	/*
 | 
					
 | 
				
			||||||
	 * Clear the online state before clearing memcg_caches array
 | 
					 | 
				
			||||||
	 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
 | 
					 | 
				
			||||||
	 * guarantees that no cache will be created for this cgroup
 | 
					 | 
				
			||||||
	 * after we are done (see memcg_create_kmem_cache()).
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	memcg->kmem_state = KMEM_ALLOCATED;
 | 
						memcg->kmem_state = KMEM_ALLOCATED;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	parent = parent_mem_cgroup(memcg);
 | 
						parent = parent_mem_cgroup(memcg);
 | 
				
			||||||
	if (!parent)
 | 
						if (!parent)
 | 
				
			||||||
		parent = root_mem_cgroup;
 | 
							parent = root_mem_cgroup;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * Deactivate and reparent kmem_caches and objcgs.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	memcg_deactivate_kmem_caches(memcg, parent);
 | 
					 | 
				
			||||||
	memcg_reparent_objcgs(memcg, parent);
 | 
						memcg_reparent_objcgs(memcg, parent);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kmemcg_id = memcg->kmemcg_id;
 | 
						kmemcg_id = memcg->kmemcg_id;
 | 
				
			||||||
| 
						 | 
					@ -5384,9 +5288,6 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* The following stuff does not apply to the root */
 | 
						/* The following stuff does not apply to the root */
 | 
				
			||||||
	if (!parent) {
 | 
						if (!parent) {
 | 
				
			||||||
#ifdef CONFIG_MEMCG_KMEM
 | 
					 | 
				
			||||||
		INIT_LIST_HEAD(&memcg->kmem_caches);
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
		root_mem_cgroup = memcg;
 | 
							root_mem_cgroup = memcg;
 | 
				
			||||||
		return &memcg->css;
 | 
							return &memcg->css;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										16
									
								
								mm/slab.c
									
									
									
									
									
								
							
							
						
						
									
										16
									
								
								mm/slab.c
									
									
									
									
									
								
							| 
						 | 
					@ -1249,7 +1249,7 @@ void __init kmem_cache_init(void)
 | 
				
			||||||
				  nr_node_ids * sizeof(struct kmem_cache_node *),
 | 
									  nr_node_ids * sizeof(struct kmem_cache_node *),
 | 
				
			||||||
				  SLAB_HWCACHE_ALIGN, 0, 0);
 | 
									  SLAB_HWCACHE_ALIGN, 0, 0);
 | 
				
			||||||
	list_add(&kmem_cache->list, &slab_caches);
 | 
						list_add(&kmem_cache->list, &slab_caches);
 | 
				
			||||||
	memcg_link_cache(kmem_cache, NULL);
 | 
						memcg_link_cache(kmem_cache);
 | 
				
			||||||
	slab_state = PARTIAL;
 | 
						slab_state = PARTIAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					@ -2253,17 +2253,6 @@ int __kmem_cache_shrink(struct kmem_cache *cachep)
 | 
				
			||||||
	return (ret ? 1 : 0);
 | 
						return (ret ? 1 : 0);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_MEMCG
 | 
					 | 
				
			||||||
void __kmemcg_cache_deactivate(struct kmem_cache *cachep)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	__kmem_cache_shrink(cachep);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
int __kmem_cache_shutdown(struct kmem_cache *cachep)
 | 
					int __kmem_cache_shutdown(struct kmem_cache *cachep)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return __kmem_cache_shrink(cachep);
 | 
						return __kmem_cache_shrink(cachep);
 | 
				
			||||||
| 
						 | 
					@ -3872,7 +3861,8 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	lockdep_assert_held(&slab_mutex);
 | 
						lockdep_assert_held(&slab_mutex);
 | 
				
			||||||
	for_each_memcg_cache(c, cachep) {
 | 
						c = memcg_cache(cachep);
 | 
				
			||||||
 | 
						if (c) {
 | 
				
			||||||
		/* return value determined by the root cache only */
 | 
							/* return value determined by the root cache only */
 | 
				
			||||||
		__do_tune_cpucache(c, limit, batchcount, shared, gfp);
 | 
							__do_tune_cpucache(c, limit, batchcount, shared, gfp);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										144
									
								
								mm/slab.h
									
									
									
									
									
								
							
							
						
						
									
										144
									
								
								mm/slab.h
									
									
									
									
									
								
							| 
						 | 
					@ -32,66 +32,25 @@ struct kmem_cache {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else /* !CONFIG_SLOB */
 | 
					#else /* !CONFIG_SLOB */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct memcg_cache_array {
 | 
					 | 
				
			||||||
	struct rcu_head rcu;
 | 
					 | 
				
			||||||
	struct kmem_cache *entries[0];
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * This is the main placeholder for memcg-related information in kmem caches.
 | 
					 * This is the main placeholder for memcg-related information in kmem caches.
 | 
				
			||||||
 * Both the root cache and the child caches will have it. For the root cache,
 | 
					 * Both the root cache and the child cache will have it. Some fields are used
 | 
				
			||||||
 * this will hold a dynamically allocated array large enough to hold
 | 
					 * in both cases, other are specific to root caches.
 | 
				
			||||||
 * information about the currently limited memcgs in the system. To allow the
 | 
					 | 
				
			||||||
 * array to be accessed without taking any locks, on relocation we free the old
 | 
					 | 
				
			||||||
 * version only after a grace period.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * Root and child caches hold different metadata.
 | 
					 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * @root_cache:	Common to root and child caches.  NULL for root, pointer to
 | 
					 * @root_cache:	Common to root and child caches.  NULL for root, pointer to
 | 
				
			||||||
 *		the root cache for children.
 | 
					 *		the root cache for children.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * The following fields are specific to root caches.
 | 
					 * The following fields are specific to root caches.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * @memcg_caches: kmemcg ID indexed table of child caches.  This table is
 | 
					 * @memcg_cache: pointer to memcg kmem cache, used by all non-root memory
 | 
				
			||||||
 *		used to index child cachces during allocation and cleared
 | 
					 *		cgroups.
 | 
				
			||||||
 *		early during shutdown.
 | 
					 * @root_caches_node: list node for slab_root_caches list.
 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * @root_caches_node: List node for slab_root_caches list.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * @children:	List of all child caches.  While the child caches are also
 | 
					 | 
				
			||||||
 *		reachable through @memcg_caches, a child cache remains on
 | 
					 | 
				
			||||||
 *		this list until it is actually destroyed.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * The following fields are specific to child caches.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * @memcg:	Pointer to the memcg this cache belongs to.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * @children_node: List node for @root_cache->children list.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * @kmem_caches_node: List node for @memcg->kmem_caches list.
 | 
					 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
struct memcg_cache_params {
 | 
					struct memcg_cache_params {
 | 
				
			||||||
	struct kmem_cache *root_cache;
 | 
						struct kmem_cache *root_cache;
 | 
				
			||||||
	union {
 | 
					 | 
				
			||||||
		struct {
 | 
					 | 
				
			||||||
			struct memcg_cache_array __rcu *memcg_caches;
 | 
					 | 
				
			||||||
			struct list_head __root_caches_node;
 | 
					 | 
				
			||||||
			struct list_head children;
 | 
					 | 
				
			||||||
			bool dying;
 | 
					 | 
				
			||||||
		};
 | 
					 | 
				
			||||||
		struct {
 | 
					 | 
				
			||||||
			struct mem_cgroup *memcg;
 | 
					 | 
				
			||||||
			struct list_head children_node;
 | 
					 | 
				
			||||||
			struct list_head kmem_caches_node;
 | 
					 | 
				
			||||||
			struct percpu_ref refcnt;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
			void (*work_fn)(struct kmem_cache *);
 | 
						struct kmem_cache *memcg_cache;
 | 
				
			||||||
			union {
 | 
						struct list_head __root_caches_node;
 | 
				
			||||||
				struct rcu_head rcu_head;
 | 
					 | 
				
			||||||
				struct work_struct work;
 | 
					 | 
				
			||||||
			};
 | 
					 | 
				
			||||||
		};
 | 
					 | 
				
			||||||
	};
 | 
					 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
#endif /* CONFIG_SLOB */
 | 
					#endif /* CONFIG_SLOB */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -236,8 +195,6 @@ bool __kmem_cache_empty(struct kmem_cache *);
 | 
				
			||||||
int __kmem_cache_shutdown(struct kmem_cache *);
 | 
					int __kmem_cache_shutdown(struct kmem_cache *);
 | 
				
			||||||
void __kmem_cache_release(struct kmem_cache *);
 | 
					void __kmem_cache_release(struct kmem_cache *);
 | 
				
			||||||
int __kmem_cache_shrink(struct kmem_cache *);
 | 
					int __kmem_cache_shrink(struct kmem_cache *);
 | 
				
			||||||
void __kmemcg_cache_deactivate(struct kmem_cache *s);
 | 
					 | 
				
			||||||
void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
 | 
					 | 
				
			||||||
void slab_kmem_cache_release(struct kmem_cache *);
 | 
					void slab_kmem_cache_release(struct kmem_cache *);
 | 
				
			||||||
void kmem_cache_shrink_all(struct kmem_cache *s);
 | 
					void kmem_cache_shrink_all(struct kmem_cache *s);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -311,14 +268,6 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t fla
 | 
				
			||||||
extern struct list_head		slab_root_caches;
 | 
					extern struct list_head		slab_root_caches;
 | 
				
			||||||
#define root_caches_node	memcg_params.__root_caches_node
 | 
					#define root_caches_node	memcg_params.__root_caches_node
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Iterate over all memcg caches of the given root cache. The caller must hold
 | 
					 | 
				
			||||||
 * slab_mutex.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
#define for_each_memcg_cache(iter, root) \
 | 
					 | 
				
			||||||
	list_for_each_entry(iter, &(root)->memcg_params.children, \
 | 
					 | 
				
			||||||
			    memcg_params.children_node)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline bool is_root_cache(struct kmem_cache *s)
 | 
					static inline bool is_root_cache(struct kmem_cache *s)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return !s->memcg_params.root_cache;
 | 
						return !s->memcg_params.root_cache;
 | 
				
			||||||
| 
						 | 
					@ -349,6 +298,13 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
 | 
				
			||||||
	return s->memcg_params.root_cache;
 | 
						return s->memcg_params.root_cache;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline struct kmem_cache *memcg_cache(struct kmem_cache *s)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						if (is_root_cache(s))
 | 
				
			||||||
 | 
							return s->memcg_params.memcg_cache;
 | 
				
			||||||
 | 
						return NULL;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
 | 
					static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					@ -361,25 +317,9 @@ static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
 | 
				
			||||||
		((unsigned long)page->obj_cgroups & ~0x1UL);
 | 
							((unsigned long)page->obj_cgroups & ~0x1UL);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					static inline bool page_has_obj_cgroups(struct page *page)
 | 
				
			||||||
 * Expects a pointer to a slab page. Please note, that PageSlab() check
 | 
					 | 
				
			||||||
 * isn't sufficient, as it returns true also for tail compound slab pages,
 | 
					 | 
				
			||||||
 * which do not have slab_cache pointer set.
 | 
					 | 
				
			||||||
 * So this function assumes that the page can pass PageSlab() && !PageTail()
 | 
					 | 
				
			||||||
 * check.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * The kmem_cache can be reparented asynchronously. The caller must ensure
 | 
					 | 
				
			||||||
 * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct kmem_cache *s;
 | 
						return ((unsigned long)page->obj_cgroups & 0x1UL);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	s = READ_ONCE(page->slab_cache);
 | 
					 | 
				
			||||||
	if (s && !is_root_cache(s))
 | 
					 | 
				
			||||||
		return READ_ONCE(s->memcg_params.memcg);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return NULL;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline int memcg_alloc_page_obj_cgroups(struct page *page,
 | 
					static inline int memcg_alloc_page_obj_cgroups(struct page *page,
 | 
				
			||||||
| 
						 | 
					@ -418,17 +358,25 @@ static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
 | 
				
			||||||
						size_t objects, gfp_t flags)
 | 
											size_t objects, gfp_t flags)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct kmem_cache *cachep;
 | 
						struct kmem_cache *cachep;
 | 
				
			||||||
 | 
						struct obj_cgroup *objcg;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cachep = memcg_kmem_get_cache(s, objcgp);
 | 
						if (memcg_kmem_bypass())
 | 
				
			||||||
 | 
							return s;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						cachep = memcg_kmem_get_cache(s);
 | 
				
			||||||
	if (is_root_cache(cachep))
 | 
						if (is_root_cache(cachep))
 | 
				
			||||||
		return s;
 | 
							return s;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (obj_cgroup_charge(*objcgp, flags, objects * obj_full_size(s))) {
 | 
						objcg = get_obj_cgroup_from_current();
 | 
				
			||||||
		obj_cgroup_put(*objcgp);
 | 
						if (!objcg)
 | 
				
			||||||
		memcg_kmem_put_cache(cachep);
 | 
							return s;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
 | 
				
			||||||
 | 
							obj_cgroup_put(objcg);
 | 
				
			||||||
		cachep = NULL;
 | 
							cachep = NULL;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						*objcgp = objcg;
 | 
				
			||||||
	return cachep;
 | 
						return cachep;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -467,7 +415,6 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	obj_cgroup_put(objcg);
 | 
						obj_cgroup_put(objcg);
 | 
				
			||||||
	memcg_kmem_put_cache(s);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
 | 
					static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
 | 
				
			||||||
| 
						 | 
					@ -491,7 +438,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void slab_init_memcg_params(struct kmem_cache *);
 | 
					extern void slab_init_memcg_params(struct kmem_cache *);
 | 
				
			||||||
extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
 | 
					extern void memcg_link_cache(struct kmem_cache *s);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else /* CONFIG_MEMCG_KMEM */
 | 
					#else /* CONFIG_MEMCG_KMEM */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -499,9 +446,6 @@ extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
 | 
				
			||||||
#define slab_root_caches	slab_caches
 | 
					#define slab_root_caches	slab_caches
 | 
				
			||||||
#define root_caches_node	list
 | 
					#define root_caches_node	list
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define for_each_memcg_cache(iter, root) \
 | 
					 | 
				
			||||||
	for ((void)(iter), (void)(root); 0; )
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline bool is_root_cache(struct kmem_cache *s)
 | 
					static inline bool is_root_cache(struct kmem_cache *s)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
| 
						 | 
					@ -523,7 +467,17 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
 | 
				
			||||||
	return s;
 | 
						return s;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
 | 
					static inline struct kmem_cache *memcg_cache(struct kmem_cache *s)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return NULL;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline bool page_has_obj_cgroups(struct page *page)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return false;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return NULL;
 | 
						return NULL;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -560,8 +514,7 @@ static inline void slab_init_memcg_params(struct kmem_cache *s)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void memcg_link_cache(struct kmem_cache *s,
 | 
					static inline void memcg_link_cache(struct kmem_cache *s)
 | 
				
			||||||
				    struct mem_cgroup *memcg)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -582,17 +535,14 @@ static __always_inline int charge_slab_page(struct page *page,
 | 
				
			||||||
					    gfp_t gfp, int order,
 | 
										    gfp_t gfp, int order,
 | 
				
			||||||
					    struct kmem_cache *s)
 | 
										    struct kmem_cache *s)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
#ifdef CONFIG_MEMCG_KMEM
 | 
					 | 
				
			||||||
	if (memcg_kmem_enabled() && !is_root_cache(s)) {
 | 
						if (memcg_kmem_enabled() && !is_root_cache(s)) {
 | 
				
			||||||
		int ret;
 | 
							int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ret = memcg_alloc_page_obj_cgroups(page, s, gfp);
 | 
							ret = memcg_alloc_page_obj_cgroups(page, s, gfp);
 | 
				
			||||||
		if (ret)
 | 
							if (ret)
 | 
				
			||||||
			return ret;
 | 
								return ret;
 | 
				
			||||||
 | 
					 | 
				
			||||||
		percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
#endif
 | 
					
 | 
				
			||||||
	mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
 | 
						mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
 | 
				
			||||||
			    PAGE_SIZE << order);
 | 
								    PAGE_SIZE << order);
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					@ -601,12 +551,9 @@ static __always_inline int charge_slab_page(struct page *page,
 | 
				
			||||||
static __always_inline void uncharge_slab_page(struct page *page, int order,
 | 
					static __always_inline void uncharge_slab_page(struct page *page, int order,
 | 
				
			||||||
					       struct kmem_cache *s)
 | 
										       struct kmem_cache *s)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
#ifdef CONFIG_MEMCG_KMEM
 | 
						if (memcg_kmem_enabled() && !is_root_cache(s))
 | 
				
			||||||
	if (memcg_kmem_enabled() && !is_root_cache(s)) {
 | 
					 | 
				
			||||||
		memcg_free_page_obj_cgroups(page);
 | 
							memcg_free_page_obj_cgroups(page);
 | 
				
			||||||
		percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order);
 | 
					
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
	mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
 | 
						mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
 | 
				
			||||||
			    -(PAGE_SIZE << order));
 | 
								    -(PAGE_SIZE << order));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -749,9 +696,6 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
 | 
				
			||||||
void *slab_start(struct seq_file *m, loff_t *pos);
 | 
					void *slab_start(struct seq_file *m, loff_t *pos);
 | 
				
			||||||
void *slab_next(struct seq_file *m, void *p, loff_t *pos);
 | 
					void *slab_next(struct seq_file *m, void *p, loff_t *pos);
 | 
				
			||||||
void slab_stop(struct seq_file *m, void *p);
 | 
					void slab_stop(struct seq_file *m, void *p);
 | 
				
			||||||
void *memcg_slab_start(struct seq_file *m, loff_t *pos);
 | 
					 | 
				
			||||||
void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
 | 
					 | 
				
			||||||
void memcg_slab_stop(struct seq_file *m, void *p);
 | 
					 | 
				
			||||||
int memcg_slab_show(struct seq_file *m, void *p);
 | 
					int memcg_slab_show(struct seq_file *m, void *p);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
 | 
					#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										445
									
								
								mm/slab_common.c
									
									
									
									
									
								
							
							
						
						
									
										445
									
								
								mm/slab_common.c
									
									
									
									
									
								
							| 
						 | 
					@ -133,141 +133,36 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
 | 
				
			||||||
#ifdef CONFIG_MEMCG_KMEM
 | 
					#ifdef CONFIG_MEMCG_KMEM
 | 
				
			||||||
 | 
					
 | 
				
			||||||
LIST_HEAD(slab_root_caches);
 | 
					LIST_HEAD(slab_root_caches);
 | 
				
			||||||
static DEFINE_SPINLOCK(memcg_kmem_wq_lock);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
void slab_init_memcg_params(struct kmem_cache *s)
 | 
					void slab_init_memcg_params(struct kmem_cache *s)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	s->memcg_params.root_cache = NULL;
 | 
						s->memcg_params.root_cache = NULL;
 | 
				
			||||||
	RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
 | 
						s->memcg_params.memcg_cache = NULL;
 | 
				
			||||||
	INIT_LIST_HEAD(&s->memcg_params.children);
 | 
					 | 
				
			||||||
	s->memcg_params.dying = false;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int init_memcg_params(struct kmem_cache *s,
 | 
					static void init_memcg_params(struct kmem_cache *s,
 | 
				
			||||||
			      struct kmem_cache *root_cache)
 | 
								      struct kmem_cache *root_cache)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct memcg_cache_array *arr;
 | 
						if (root_cache)
 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (root_cache) {
 | 
					 | 
				
			||||||
		int ret = percpu_ref_init(&s->memcg_params.refcnt,
 | 
					 | 
				
			||||||
					  kmemcg_cache_shutdown,
 | 
					 | 
				
			||||||
					  0, GFP_KERNEL);
 | 
					 | 
				
			||||||
		if (ret)
 | 
					 | 
				
			||||||
			return ret;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		s->memcg_params.root_cache = root_cache;
 | 
							s->memcg_params.root_cache = root_cache;
 | 
				
			||||||
		INIT_LIST_HEAD(&s->memcg_params.children_node);
 | 
						else
 | 
				
			||||||
		INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node);
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		slab_init_memcg_params(s);
 | 
							slab_init_memcg_params(s);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!memcg_nr_cache_ids)
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	arr = kvzalloc(sizeof(struct memcg_cache_array) +
 | 
					 | 
				
			||||||
		       memcg_nr_cache_ids * sizeof(void *),
 | 
					 | 
				
			||||||
		       GFP_KERNEL);
 | 
					 | 
				
			||||||
	if (!arr)
 | 
					 | 
				
			||||||
		return -ENOMEM;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
 | 
					 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void destroy_memcg_params(struct kmem_cache *s)
 | 
					void memcg_link_cache(struct kmem_cache *s)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (is_root_cache(s)) {
 | 
						if (is_root_cache(s))
 | 
				
			||||||
		kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
 | 
					 | 
				
			||||||
	} else {
 | 
					 | 
				
			||||||
		mem_cgroup_put(s->memcg_params.memcg);
 | 
					 | 
				
			||||||
		WRITE_ONCE(s->memcg_params.memcg, NULL);
 | 
					 | 
				
			||||||
		percpu_ref_exit(&s->memcg_params.refcnt);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void free_memcg_params(struct rcu_head *rcu)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct memcg_cache_array *old;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	old = container_of(rcu, struct memcg_cache_array, rcu);
 | 
					 | 
				
			||||||
	kvfree(old);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static int update_memcg_params(struct kmem_cache *s, int new_array_size)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct memcg_cache_array *old, *new;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	new = kvzalloc(sizeof(struct memcg_cache_array) +
 | 
					 | 
				
			||||||
		       new_array_size * sizeof(void *), GFP_KERNEL);
 | 
					 | 
				
			||||||
	if (!new)
 | 
					 | 
				
			||||||
		return -ENOMEM;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	old = rcu_dereference_protected(s->memcg_params.memcg_caches,
 | 
					 | 
				
			||||||
					lockdep_is_held(&slab_mutex));
 | 
					 | 
				
			||||||
	if (old)
 | 
					 | 
				
			||||||
		memcpy(new->entries, old->entries,
 | 
					 | 
				
			||||||
		       memcg_nr_cache_ids * sizeof(void *));
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	rcu_assign_pointer(s->memcg_params.memcg_caches, new);
 | 
					 | 
				
			||||||
	if (old)
 | 
					 | 
				
			||||||
		call_rcu(&old->rcu, free_memcg_params);
 | 
					 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
int memcg_update_all_caches(int num_memcgs)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct kmem_cache *s;
 | 
					 | 
				
			||||||
	int ret = 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	mutex_lock(&slab_mutex);
 | 
					 | 
				
			||||||
	list_for_each_entry(s, &slab_root_caches, root_caches_node) {
 | 
					 | 
				
			||||||
		ret = update_memcg_params(s, num_memcgs);
 | 
					 | 
				
			||||||
		/*
 | 
					 | 
				
			||||||
		 * Instead of freeing the memory, we'll just leave the caches
 | 
					 | 
				
			||||||
		 * up to this point in an updated state.
 | 
					 | 
				
			||||||
		 */
 | 
					 | 
				
			||||||
		if (ret)
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	mutex_unlock(&slab_mutex);
 | 
					 | 
				
			||||||
	return ret;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (is_root_cache(s)) {
 | 
					 | 
				
			||||||
		list_add(&s->root_caches_node, &slab_root_caches);
 | 
							list_add(&s->root_caches_node, &slab_root_caches);
 | 
				
			||||||
	} else {
 | 
					 | 
				
			||||||
		css_get(&memcg->css);
 | 
					 | 
				
			||||||
		s->memcg_params.memcg = memcg;
 | 
					 | 
				
			||||||
		list_add(&s->memcg_params.children_node,
 | 
					 | 
				
			||||||
			 &s->memcg_params.root_cache->memcg_params.children);
 | 
					 | 
				
			||||||
		list_add(&s->memcg_params.kmem_caches_node,
 | 
					 | 
				
			||||||
			 &s->memcg_params.memcg->kmem_caches);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void memcg_unlink_cache(struct kmem_cache *s)
 | 
					static void memcg_unlink_cache(struct kmem_cache *s)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (is_root_cache(s)) {
 | 
						if (is_root_cache(s))
 | 
				
			||||||
		list_del(&s->root_caches_node);
 | 
							list_del(&s->root_caches_node);
 | 
				
			||||||
	} else {
 | 
					 | 
				
			||||||
		list_del(&s->memcg_params.children_node);
 | 
					 | 
				
			||||||
		list_del(&s->memcg_params.kmem_caches_node);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
static inline int init_memcg_params(struct kmem_cache *s,
 | 
					static inline void init_memcg_params(struct kmem_cache *s,
 | 
				
			||||||
				     struct kmem_cache *root_cache)
 | 
									     struct kmem_cache *root_cache)
 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void destroy_memcg_params(struct kmem_cache *s)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -328,14 +223,6 @@ int slab_unmergeable(struct kmem_cache *s)
 | 
				
			||||||
	if (s->refcount < 0)
 | 
						if (s->refcount < 0)
 | 
				
			||||||
		return 1;
 | 
							return 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_MEMCG_KMEM
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * Skip the dying kmem_cache.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (s->memcg_params.dying)
 | 
					 | 
				
			||||||
		return 1;
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -390,7 +277,7 @@ static struct kmem_cache *create_cache(const char *name,
 | 
				
			||||||
		unsigned int object_size, unsigned int align,
 | 
							unsigned int object_size, unsigned int align,
 | 
				
			||||||
		slab_flags_t flags, unsigned int useroffset,
 | 
							slab_flags_t flags, unsigned int useroffset,
 | 
				
			||||||
		unsigned int usersize, void (*ctor)(void *),
 | 
							unsigned int usersize, void (*ctor)(void *),
 | 
				
			||||||
		struct mem_cgroup *memcg, struct kmem_cache *root_cache)
 | 
							struct kmem_cache *root_cache)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct kmem_cache *s;
 | 
						struct kmem_cache *s;
 | 
				
			||||||
	int err;
 | 
						int err;
 | 
				
			||||||
| 
						 | 
					@ -410,24 +297,20 @@ static struct kmem_cache *create_cache(const char *name,
 | 
				
			||||||
	s->useroffset = useroffset;
 | 
						s->useroffset = useroffset;
 | 
				
			||||||
	s->usersize = usersize;
 | 
						s->usersize = usersize;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = init_memcg_params(s, root_cache);
 | 
						init_memcg_params(s, root_cache);
 | 
				
			||||||
	if (err)
 | 
					 | 
				
			||||||
		goto out_free_cache;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	err = __kmem_cache_create(s, flags);
 | 
						err = __kmem_cache_create(s, flags);
 | 
				
			||||||
	if (err)
 | 
						if (err)
 | 
				
			||||||
		goto out_free_cache;
 | 
							goto out_free_cache;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	s->refcount = 1;
 | 
						s->refcount = 1;
 | 
				
			||||||
	list_add(&s->list, &slab_caches);
 | 
						list_add(&s->list, &slab_caches);
 | 
				
			||||||
	memcg_link_cache(s, memcg);
 | 
						memcg_link_cache(s);
 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	if (err)
 | 
						if (err)
 | 
				
			||||||
		return ERR_PTR(err);
 | 
							return ERR_PTR(err);
 | 
				
			||||||
	return s;
 | 
						return s;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
out_free_cache:
 | 
					out_free_cache:
 | 
				
			||||||
	destroy_memcg_params(s);
 | 
					 | 
				
			||||||
	kmem_cache_free(kmem_cache, s);
 | 
						kmem_cache_free(kmem_cache, s);
 | 
				
			||||||
	goto out;
 | 
						goto out;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -514,7 +397,7 @@ kmem_cache_create_usercopy(const char *name,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	s = create_cache(cache_name, size,
 | 
						s = create_cache(cache_name, size,
 | 
				
			||||||
			 calculate_alignment(flags, align, size),
 | 
								 calculate_alignment(flags, align, size),
 | 
				
			||||||
			 flags, useroffset, usersize, ctor, NULL, NULL);
 | 
								 flags, useroffset, usersize, ctor, NULL);
 | 
				
			||||||
	if (IS_ERR(s)) {
 | 
						if (IS_ERR(s)) {
 | 
				
			||||||
		err = PTR_ERR(s);
 | 
							err = PTR_ERR(s);
 | 
				
			||||||
		kfree_const(cache_name);
 | 
							kfree_const(cache_name);
 | 
				
			||||||
| 
						 | 
					@ -639,51 +522,27 @@ static int shutdown_cache(struct kmem_cache *s)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_MEMCG_KMEM
 | 
					#ifdef CONFIG_MEMCG_KMEM
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * memcg_create_kmem_cache - Create a cache for a memory cgroup.
 | 
					 * memcg_create_kmem_cache - Create a cache for non-root memory cgroups.
 | 
				
			||||||
 * @memcg: The memory cgroup the new cache is for.
 | 
					 | 
				
			||||||
 * @root_cache: The parent of the new cache.
 | 
					 * @root_cache: The parent of the new cache.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * This function attempts to create a kmem cache that will serve allocation
 | 
					 * This function attempts to create a kmem cache that will serve allocation
 | 
				
			||||||
 * requests going from @memcg to @root_cache. The new cache inherits properties
 | 
					 * requests going all non-root memory cgroups to @root_cache. The new cache
 | 
				
			||||||
 * from its parent.
 | 
					 * inherits properties from its parent.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void memcg_create_kmem_cache(struct mem_cgroup *memcg,
 | 
					void memcg_create_kmem_cache(struct kmem_cache *root_cache)
 | 
				
			||||||
			     struct kmem_cache *root_cache)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
 | 
					 | 
				
			||||||
	struct cgroup_subsys_state *css = &memcg->css;
 | 
					 | 
				
			||||||
	struct memcg_cache_array *arr;
 | 
					 | 
				
			||||||
	struct kmem_cache *s = NULL;
 | 
						struct kmem_cache *s = NULL;
 | 
				
			||||||
	char *cache_name;
 | 
						char *cache_name;
 | 
				
			||||||
	int idx;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	get_online_cpus();
 | 
						get_online_cpus();
 | 
				
			||||||
	get_online_mems();
 | 
						get_online_mems();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mutex_lock(&slab_mutex);
 | 
						mutex_lock(&slab_mutex);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						if (root_cache->memcg_params.memcg_cache)
 | 
				
			||||||
	 * The memory cgroup could have been offlined while the cache
 | 
					 | 
				
			||||||
	 * creation work was pending.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (memcg->kmem_state != KMEM_ONLINE)
 | 
					 | 
				
			||||||
		goto out_unlock;
 | 
							goto out_unlock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	idx = memcg_cache_id(memcg);
 | 
						cache_name = kasprintf(GFP_KERNEL, "%s-memcg", root_cache->name);
 | 
				
			||||||
	arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
 | 
					 | 
				
			||||||
					lockdep_is_held(&slab_mutex));
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * Since per-memcg caches are created asynchronously on first
 | 
					 | 
				
			||||||
	 * allocation (see memcg_kmem_get_cache()), several threads can try to
 | 
					 | 
				
			||||||
	 * create the same cache, but only one of them may succeed.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (arr->entries[idx])
 | 
					 | 
				
			||||||
		goto out_unlock;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
 | 
					 | 
				
			||||||
	cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
 | 
					 | 
				
			||||||
			       css->serial_nr, memcg_name_buf);
 | 
					 | 
				
			||||||
	if (!cache_name)
 | 
						if (!cache_name)
 | 
				
			||||||
		goto out_unlock;
 | 
							goto out_unlock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -691,7 +550,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
 | 
				
			||||||
			 root_cache->align,
 | 
								 root_cache->align,
 | 
				
			||||||
			 root_cache->flags & CACHE_CREATE_MASK,
 | 
								 root_cache->flags & CACHE_CREATE_MASK,
 | 
				
			||||||
			 root_cache->useroffset, root_cache->usersize,
 | 
								 root_cache->useroffset, root_cache->usersize,
 | 
				
			||||||
			 root_cache->ctor, memcg, root_cache);
 | 
								 root_cache->ctor, root_cache);
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * If we could not create a memcg cache, do not complain, because
 | 
						 * If we could not create a memcg cache, do not complain, because
 | 
				
			||||||
	 * that's not critical at all as we can always proceed with the root
 | 
						 * that's not critical at all as we can always proceed with the root
 | 
				
			||||||
| 
						 | 
					@ -708,7 +567,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
 | 
				
			||||||
	 * initialized.
 | 
						 * initialized.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	smp_wmb();
 | 
						smp_wmb();
 | 
				
			||||||
	arr->entries[idx] = s;
 | 
						root_cache->memcg_params.memcg_cache = s;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
out_unlock:
 | 
					out_unlock:
 | 
				
			||||||
	mutex_unlock(&slab_mutex);
 | 
						mutex_unlock(&slab_mutex);
 | 
				
			||||||
| 
						 | 
					@ -717,200 +576,18 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
 | 
				
			||||||
	put_online_cpus();
 | 
						put_online_cpus();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void kmemcg_workfn(struct work_struct *work)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct kmem_cache *s = container_of(work, struct kmem_cache,
 | 
					 | 
				
			||||||
					    memcg_params.work);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	get_online_cpus();
 | 
					 | 
				
			||||||
	get_online_mems();
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	mutex_lock(&slab_mutex);
 | 
					 | 
				
			||||||
	s->memcg_params.work_fn(s);
 | 
					 | 
				
			||||||
	mutex_unlock(&slab_mutex);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	put_online_mems();
 | 
					 | 
				
			||||||
	put_online_cpus();
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void kmemcg_rcufn(struct rcu_head *head)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct kmem_cache *s = container_of(head, struct kmem_cache,
 | 
					 | 
				
			||||||
					    memcg_params.rcu_head);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * We need to grab blocking locks.  Bounce to ->work.  The
 | 
					 | 
				
			||||||
	 * work item shares the space with the RCU head and can't be
 | 
					 | 
				
			||||||
	 * initialized earlier.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	INIT_WORK(&s->memcg_params.work, kmemcg_workfn);
 | 
					 | 
				
			||||||
	queue_work(memcg_kmem_cache_wq, &s->memcg_params.work);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void kmemcg_cache_shutdown_fn(struct kmem_cache *s)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	WARN_ON(shutdown_cache(s));
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct kmem_cache *s = container_of(percpu_ref, struct kmem_cache,
 | 
					 | 
				
			||||||
					    memcg_params.refcnt);
 | 
					 | 
				
			||||||
	unsigned long flags;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	spin_lock_irqsave(&memcg_kmem_wq_lock, flags);
 | 
					 | 
				
			||||||
	if (s->memcg_params.root_cache->memcg_params.dying)
 | 
					 | 
				
			||||||
		goto unlock;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	s->memcg_params.work_fn = kmemcg_cache_shutdown_fn;
 | 
					 | 
				
			||||||
	INIT_WORK(&s->memcg_params.work, kmemcg_workfn);
 | 
					 | 
				
			||||||
	queue_work(memcg_kmem_cache_wq, &s->memcg_params.work);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
unlock:
 | 
					 | 
				
			||||||
	spin_unlock_irqrestore(&memcg_kmem_wq_lock, flags);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	__kmemcg_cache_deactivate_after_rcu(s);
 | 
					 | 
				
			||||||
	percpu_ref_kill(&s->memcg_params.refcnt);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void kmemcg_cache_deactivate(struct kmem_cache *s)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (WARN_ON_ONCE(is_root_cache(s)))
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	__kmemcg_cache_deactivate(s);
 | 
					 | 
				
			||||||
	s->flags |= SLAB_DEACTIVATED;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * memcg_kmem_wq_lock is used to synchronize memcg_params.dying
 | 
					 | 
				
			||||||
	 * flag and make sure that no new kmem_cache deactivation tasks
 | 
					 | 
				
			||||||
	 * are queued (see flush_memcg_workqueue() ).
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	spin_lock_irq(&memcg_kmem_wq_lock);
 | 
					 | 
				
			||||||
	if (s->memcg_params.root_cache->memcg_params.dying)
 | 
					 | 
				
			||||||
		goto unlock;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	s->memcg_params.work_fn = kmemcg_cache_deactivate_after_rcu;
 | 
					 | 
				
			||||||
	call_rcu(&s->memcg_params.rcu_head, kmemcg_rcufn);
 | 
					 | 
				
			||||||
unlock:
 | 
					 | 
				
			||||||
	spin_unlock_irq(&memcg_kmem_wq_lock);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg,
 | 
					 | 
				
			||||||
				  struct mem_cgroup *parent)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	int idx;
 | 
					 | 
				
			||||||
	struct memcg_cache_array *arr;
 | 
					 | 
				
			||||||
	struct kmem_cache *s, *c;
 | 
					 | 
				
			||||||
	unsigned int nr_reparented;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	idx = memcg_cache_id(memcg);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	get_online_cpus();
 | 
					 | 
				
			||||||
	get_online_mems();
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	mutex_lock(&slab_mutex);
 | 
					 | 
				
			||||||
	list_for_each_entry(s, &slab_root_caches, root_caches_node) {
 | 
					 | 
				
			||||||
		arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
 | 
					 | 
				
			||||||
						lockdep_is_held(&slab_mutex));
 | 
					 | 
				
			||||||
		c = arr->entries[idx];
 | 
					 | 
				
			||||||
		if (!c)
 | 
					 | 
				
			||||||
			continue;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		kmemcg_cache_deactivate(c);
 | 
					 | 
				
			||||||
		arr->entries[idx] = NULL;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	nr_reparented = 0;
 | 
					 | 
				
			||||||
	list_for_each_entry(s, &memcg->kmem_caches,
 | 
					 | 
				
			||||||
			    memcg_params.kmem_caches_node) {
 | 
					 | 
				
			||||||
		WRITE_ONCE(s->memcg_params.memcg, parent);
 | 
					 | 
				
			||||||
		css_put(&memcg->css);
 | 
					 | 
				
			||||||
		nr_reparented++;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if (nr_reparented) {
 | 
					 | 
				
			||||||
		list_splice_init(&memcg->kmem_caches,
 | 
					 | 
				
			||||||
				 &parent->kmem_caches);
 | 
					 | 
				
			||||||
		css_get_many(&parent->css, nr_reparented);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	mutex_unlock(&slab_mutex);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	put_online_mems();
 | 
					 | 
				
			||||||
	put_online_cpus();
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static int shutdown_memcg_caches(struct kmem_cache *s)
 | 
					static int shutdown_memcg_caches(struct kmem_cache *s)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct memcg_cache_array *arr;
 | 
					 | 
				
			||||||
	struct kmem_cache *c, *c2;
 | 
					 | 
				
			||||||
	LIST_HEAD(busy);
 | 
					 | 
				
			||||||
	int i;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	BUG_ON(!is_root_cache(s));
 | 
						BUG_ON(!is_root_cache(s));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						if (s->memcg_params.memcg_cache)
 | 
				
			||||||
	 * First, shutdown active caches, i.e. caches that belong to online
 | 
							WARN_ON(shutdown_cache(s->memcg_params.memcg_cache));
 | 
				
			||||||
	 * memory cgroups.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
 | 
					 | 
				
			||||||
					lockdep_is_held(&slab_mutex));
 | 
					 | 
				
			||||||
	for_each_memcg_cache_index(i) {
 | 
					 | 
				
			||||||
		c = arr->entries[i];
 | 
					 | 
				
			||||||
		if (!c)
 | 
					 | 
				
			||||||
			continue;
 | 
					 | 
				
			||||||
		if (shutdown_cache(c))
 | 
					 | 
				
			||||||
			/*
 | 
					 | 
				
			||||||
			 * The cache still has objects. Move it to a temporary
 | 
					 | 
				
			||||||
			 * list so as not to try to destroy it for a second
 | 
					 | 
				
			||||||
			 * time while iterating over inactive caches below.
 | 
					 | 
				
			||||||
			 */
 | 
					 | 
				
			||||||
			list_move(&c->memcg_params.children_node, &busy);
 | 
					 | 
				
			||||||
		else
 | 
					 | 
				
			||||||
			/*
 | 
					 | 
				
			||||||
			 * The cache is empty and will be destroyed soon. Clear
 | 
					 | 
				
			||||||
			 * the pointer to it in the memcg_caches array so that
 | 
					 | 
				
			||||||
			 * it will never be accessed even if the root cache
 | 
					 | 
				
			||||||
			 * stays alive.
 | 
					 | 
				
			||||||
			 */
 | 
					 | 
				
			||||||
			arr->entries[i] = NULL;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * Second, shutdown all caches left from memory cgroups that are now
 | 
					 | 
				
			||||||
	 * offline.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	list_for_each_entry_safe(c, c2, &s->memcg_params.children,
 | 
					 | 
				
			||||||
				 memcg_params.children_node)
 | 
					 | 
				
			||||||
		shutdown_cache(c);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	list_splice(&busy, &s->memcg_params.children);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * A cache being destroyed must be empty. In particular, this means
 | 
					 | 
				
			||||||
	 * that all per memcg caches attached to it must be empty too.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (!list_empty(&s->memcg_params.children))
 | 
					 | 
				
			||||||
		return -EBUSY;
 | 
					 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void memcg_set_kmem_cache_dying(struct kmem_cache *s)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	spin_lock_irq(&memcg_kmem_wq_lock);
 | 
					 | 
				
			||||||
	s->memcg_params.dying = true;
 | 
					 | 
				
			||||||
	spin_unlock_irq(&memcg_kmem_wq_lock);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void flush_memcg_workqueue(struct kmem_cache *s)
 | 
					static void flush_memcg_workqueue(struct kmem_cache *s)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * SLAB and SLUB deactivate the kmem_caches through call_rcu. Make
 | 
					 | 
				
			||||||
	 * sure all registered rcu callbacks have been invoked.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	rcu_barrier();
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB
 | 
						 * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB
 | 
				
			||||||
	 * deactivates the memcg kmem_caches through workqueue. Make sure all
 | 
						 * deactivates the memcg kmem_caches through workqueue. Make sure all
 | 
				
			||||||
| 
						 | 
					@ -918,30 +595,21 @@ static void flush_memcg_workqueue(struct kmem_cache *s)
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (likely(memcg_kmem_cache_wq))
 | 
						if (likely(memcg_kmem_cache_wq))
 | 
				
			||||||
		flush_workqueue(memcg_kmem_cache_wq);
 | 
							flush_workqueue(memcg_kmem_cache_wq);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * If we're racing with children kmem_cache deactivation, it might
 | 
					 | 
				
			||||||
	 * take another rcu grace period to complete their destruction.
 | 
					 | 
				
			||||||
	 * At this moment the corresponding percpu_ref_kill() call should be
 | 
					 | 
				
			||||||
	 * done, but it might take another rcu grace period to complete
 | 
					 | 
				
			||||||
	 * switching to the atomic mode.
 | 
					 | 
				
			||||||
	 * Please, note that we check without grabbing the slab_mutex. It's safe
 | 
					 | 
				
			||||||
	 * because at this moment the children list can't grow.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (!list_empty(&s->memcg_params.children))
 | 
					 | 
				
			||||||
		rcu_barrier();
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
static inline int shutdown_memcg_caches(struct kmem_cache *s)
 | 
					static inline int shutdown_memcg_caches(struct kmem_cache *s)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void flush_memcg_workqueue(struct kmem_cache *s)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
#endif /* CONFIG_MEMCG_KMEM */
 | 
					#endif /* CONFIG_MEMCG_KMEM */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void slab_kmem_cache_release(struct kmem_cache *s)
 | 
					void slab_kmem_cache_release(struct kmem_cache *s)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	__kmem_cache_release(s);
 | 
						__kmem_cache_release(s);
 | 
				
			||||||
	destroy_memcg_params(s);
 | 
					 | 
				
			||||||
	kfree_const(s->name);
 | 
						kfree_const(s->name);
 | 
				
			||||||
	kmem_cache_free(kmem_cache, s);
 | 
						kmem_cache_free(kmem_cache, s);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -953,6 +621,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
 | 
				
			||||||
	if (unlikely(!s))
 | 
						if (unlikely(!s))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						flush_memcg_workqueue(s);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	get_online_cpus();
 | 
						get_online_cpus();
 | 
				
			||||||
	get_online_mems();
 | 
						get_online_mems();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -962,22 +632,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
 | 
				
			||||||
	if (s->refcount)
 | 
						if (s->refcount)
 | 
				
			||||||
		goto out_unlock;
 | 
							goto out_unlock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_MEMCG_KMEM
 | 
					 | 
				
			||||||
	memcg_set_kmem_cache_dying(s);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	mutex_unlock(&slab_mutex);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	put_online_mems();
 | 
					 | 
				
			||||||
	put_online_cpus();
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	flush_memcg_workqueue(s);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	get_online_cpus();
 | 
					 | 
				
			||||||
	get_online_mems();
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	mutex_lock(&slab_mutex);
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	err = shutdown_memcg_caches(s);
 | 
						err = shutdown_memcg_caches(s);
 | 
				
			||||||
	if (!err)
 | 
						if (!err)
 | 
				
			||||||
		err = shutdown_cache(s);
 | 
							err = shutdown_cache(s);
 | 
				
			||||||
| 
						 | 
					@ -1019,7 +673,7 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
 | 
				
			||||||
EXPORT_SYMBOL(kmem_cache_shrink);
 | 
					EXPORT_SYMBOL(kmem_cache_shrink);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * kmem_cache_shrink_all - shrink a cache and all memcg caches for root cache
 | 
					 * kmem_cache_shrink_all - shrink root and memcg caches
 | 
				
			||||||
 * @s: The cache pointer
 | 
					 * @s: The cache pointer
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void kmem_cache_shrink_all(struct kmem_cache *s)
 | 
					void kmem_cache_shrink_all(struct kmem_cache *s)
 | 
				
			||||||
| 
						 | 
					@ -1036,21 +690,11 @@ void kmem_cache_shrink_all(struct kmem_cache *s)
 | 
				
			||||||
	kasan_cache_shrink(s);
 | 
						kasan_cache_shrink(s);
 | 
				
			||||||
	__kmem_cache_shrink(s);
 | 
						__kmem_cache_shrink(s);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						c = memcg_cache(s);
 | 
				
			||||||
	 * We have to take the slab_mutex to protect from the memcg list
 | 
						if (c) {
 | 
				
			||||||
	 * modification.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	mutex_lock(&slab_mutex);
 | 
					 | 
				
			||||||
	for_each_memcg_cache(c, s) {
 | 
					 | 
				
			||||||
		/*
 | 
					 | 
				
			||||||
		 * Don't need to shrink deactivated memcg caches.
 | 
					 | 
				
			||||||
		 */
 | 
					 | 
				
			||||||
		if (s->flags & SLAB_DEACTIVATED)
 | 
					 | 
				
			||||||
			continue;
 | 
					 | 
				
			||||||
		kasan_cache_shrink(c);
 | 
							kasan_cache_shrink(c);
 | 
				
			||||||
		__kmem_cache_shrink(c);
 | 
							__kmem_cache_shrink(c);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	mutex_unlock(&slab_mutex);
 | 
					 | 
				
			||||||
	put_online_mems();
 | 
						put_online_mems();
 | 
				
			||||||
	put_online_cpus();
 | 
						put_online_cpus();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -1105,7 +749,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	create_boot_cache(s, name, size, flags, useroffset, usersize);
 | 
						create_boot_cache(s, name, size, flags, useroffset, usersize);
 | 
				
			||||||
	list_add(&s->list, &slab_caches);
 | 
						list_add(&s->list, &slab_caches);
 | 
				
			||||||
	memcg_link_cache(s, NULL);
 | 
						memcg_link_cache(s);
 | 
				
			||||||
	s->refcount = 1;
 | 
						s->refcount = 1;
 | 
				
			||||||
	return s;
 | 
						return s;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -1483,7 +1127,8 @@ memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
 | 
				
			||||||
	if (!is_root_cache(s))
 | 
						if (!is_root_cache(s))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_memcg_cache(c, s) {
 | 
						c = memcg_cache(s);
 | 
				
			||||||
 | 
						if (c) {
 | 
				
			||||||
		memset(&sinfo, 0, sizeof(sinfo));
 | 
							memset(&sinfo, 0, sizeof(sinfo));
 | 
				
			||||||
		get_slabinfo(c, &sinfo);
 | 
							get_slabinfo(c, &sinfo);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1614,7 +1259,7 @@ module_init(slab_proc_init);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MEMCG_KMEM)
 | 
					#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MEMCG_KMEM)
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Display information about kmem caches that have child memcg caches.
 | 
					 * Display information about kmem caches that have memcg cache.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static int memcg_slabinfo_show(struct seq_file *m, void *unused)
 | 
					static int memcg_slabinfo_show(struct seq_file *m, void *unused)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -1626,9 +1271,9 @@ static int memcg_slabinfo_show(struct seq_file *m, void *unused)
 | 
				
			||||||
	seq_puts(m, " <active_slabs> <num_slabs>\n");
 | 
						seq_puts(m, " <active_slabs> <num_slabs>\n");
 | 
				
			||||||
	list_for_each_entry(s, &slab_root_caches, root_caches_node) {
 | 
						list_for_each_entry(s, &slab_root_caches, root_caches_node) {
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * Skip kmem caches that don't have any memcg children.
 | 
							 * Skip kmem caches that don't have the memcg cache.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (list_empty(&s->memcg_params.children))
 | 
							if (!s->memcg_params.memcg_cache)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		memset(&sinfo, 0, sizeof(sinfo));
 | 
							memset(&sinfo, 0, sizeof(sinfo));
 | 
				
			||||||
| 
						 | 
					@ -1637,24 +1282,14 @@ static int memcg_slabinfo_show(struct seq_file *m, void *unused)
 | 
				
			||||||
			   cache_name(s), sinfo.active_objs, sinfo.num_objs,
 | 
								   cache_name(s), sinfo.active_objs, sinfo.num_objs,
 | 
				
			||||||
			   sinfo.active_slabs, sinfo.num_slabs);
 | 
								   sinfo.active_slabs, sinfo.num_slabs);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for_each_memcg_cache(c, s) {
 | 
							c = s->memcg_params.memcg_cache;
 | 
				
			||||||
			struct cgroup_subsys_state *css;
 | 
					 | 
				
			||||||
			char *status = "";
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			css = &c->memcg_params.memcg->css;
 | 
					 | 
				
			||||||
			if (!(css->flags & CSS_ONLINE))
 | 
					 | 
				
			||||||
				status = ":dead";
 | 
					 | 
				
			||||||
			else if (c->flags & SLAB_DEACTIVATED)
 | 
					 | 
				
			||||||
				status = ":deact";
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		memset(&sinfo, 0, sizeof(sinfo));
 | 
							memset(&sinfo, 0, sizeof(sinfo));
 | 
				
			||||||
		get_slabinfo(c, &sinfo);
 | 
							get_slabinfo(c, &sinfo);
 | 
				
			||||||
			seq_printf(m, "%-17s %4d%-6s %6lu %6lu %6lu %6lu\n",
 | 
							seq_printf(m, "%-17s %4d %6lu %6lu %6lu %6lu\n",
 | 
				
			||||||
				   cache_name(c), css->id, status,
 | 
								   cache_name(c), root_mem_cgroup->css.id,
 | 
				
			||||||
			   sinfo.active_objs, sinfo.num_objs,
 | 
								   sinfo.active_objs, sinfo.num_objs,
 | 
				
			||||||
			   sinfo.active_slabs, sinfo.num_slabs);
 | 
								   sinfo.active_slabs, sinfo.num_slabs);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	mutex_unlock(&slab_mutex);
 | 
						mutex_unlock(&slab_mutex);
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										38
									
								
								mm/slub.c
									
									
									
									
									
								
							
							
						
						
									
										38
									
								
								mm/slub.c
									
									
									
									
									
								
							| 
						 | 
					@ -4204,36 +4204,6 @@ int __kmem_cache_shrink(struct kmem_cache *s)
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_MEMCG
 | 
					 | 
				
			||||||
void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * Called with all the locks held after a sched RCU grace period.
 | 
					 | 
				
			||||||
	 * Even if @s becomes empty after shrinking, we can't know that @s
 | 
					 | 
				
			||||||
	 * doesn't have allocations already in-flight and thus can't
 | 
					 | 
				
			||||||
	 * destroy @s until the associated memcg is released.
 | 
					 | 
				
			||||||
	 *
 | 
					 | 
				
			||||||
	 * However, let's remove the sysfs files for empty caches here.
 | 
					 | 
				
			||||||
	 * Each cache has a lot of interface files which aren't
 | 
					 | 
				
			||||||
	 * particularly useful for empty draining caches; otherwise, we can
 | 
					 | 
				
			||||||
	 * easily end up with millions of unnecessary sysfs files on
 | 
					 | 
				
			||||||
	 * systems which have a lot of memory and transient cgroups.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (!__kmem_cache_shrink(s))
 | 
					 | 
				
			||||||
		sysfs_slab_remove(s);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void __kmemcg_cache_deactivate(struct kmem_cache *s)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * Disable empty slabs caching. Used to avoid pinning offline
 | 
					 | 
				
			||||||
	 * memory cgroups by kmem pages that can be freed.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	slub_set_cpu_partial(s, 0);
 | 
					 | 
				
			||||||
	s->min_partial = 0;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
#endif	/* CONFIG_MEMCG */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static int slab_mem_going_offline_callback(void *arg)
 | 
					static int slab_mem_going_offline_callback(void *arg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct kmem_cache *s;
 | 
						struct kmem_cache *s;
 | 
				
			||||||
| 
						 | 
					@ -4390,7 +4360,7 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	slab_init_memcg_params(s);
 | 
						slab_init_memcg_params(s);
 | 
				
			||||||
	list_add(&s->list, &slab_caches);
 | 
						list_add(&s->list, &slab_caches);
 | 
				
			||||||
	memcg_link_cache(s, NULL);
 | 
						memcg_link_cache(s);
 | 
				
			||||||
	return s;
 | 
						return s;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4458,7 +4428,8 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
 | 
				
			||||||
		s->object_size = max(s->object_size, size);
 | 
							s->object_size = max(s->object_size, size);
 | 
				
			||||||
		s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
 | 
							s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for_each_memcg_cache(c, s) {
 | 
							c = memcg_cache(s);
 | 
				
			||||||
 | 
							if (c) {
 | 
				
			||||||
			c->object_size = s->object_size;
 | 
								c->object_size = s->object_size;
 | 
				
			||||||
			c->inuse = max(c->inuse, ALIGN(size, sizeof(void *)));
 | 
								c->inuse = max(c->inuse, ALIGN(size, sizeof(void *)));
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					@ -5591,7 +5562,8 @@ static ssize_t slab_attr_store(struct kobject *kobj,
 | 
				
			||||||
		 * directly either failed or succeeded, in which case we loop
 | 
							 * directly either failed or succeeded, in which case we loop
 | 
				
			||||||
		 * through the descendants with best-effort propagation.
 | 
							 * through the descendants with best-effort propagation.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		for_each_memcg_cache(c, s)
 | 
							c = memcg_cache(s);
 | 
				
			||||||
 | 
							if (c)
 | 
				
			||||||
			attribute->store(c, buf, len);
 | 
								attribute->store(c, buf, len);
 | 
				
			||||||
		mutex_unlock(&slab_mutex);
 | 
							mutex_unlock(&slab_mutex);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue