mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	mm: memcg/slab: save obj_cgroup for non-root slab objects
Store the obj_cgroup pointer in the corresponding place of page->obj_cgroups for each allocated non-root slab object. Make sure that each allocated object holds a reference to obj_cgroup. Objcg pointer is obtained from the memcg->objcg dereferencing in memcg_kmem_get_cache() and passed from pre_alloc_hook to post_alloc_hook. Then in case of successful allocation(s) it's getting stored in the page->obj_cgroups vector. The objcg obtaining part look a bit bulky now, but it will be simplified by next commits in the series. Signed-off-by: Roman Gushchin <guro@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Shakeel Butt <shakeelb@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/20200623174037.3951353-9-guro@fb.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									286e04b8ed
								
							
						
					
					
						commit
						964d4bd370
					
				
					 5 changed files with 88 additions and 21 deletions
				
			
		| 
						 | 
					@ -1404,7 +1404,8 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
 | 
					struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep,
 | 
				
			||||||
 | 
										struct obj_cgroup **objcgp);
 | 
				
			||||||
void memcg_kmem_put_cache(struct kmem_cache *cachep);
 | 
					void memcg_kmem_put_cache(struct kmem_cache *cachep);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_MEMCG_KMEM
 | 
					#ifdef CONFIG_MEMCG_KMEM
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2973,7 +2973,8 @@ static inline bool memcg_kmem_bypass(void)
 | 
				
			||||||
 * done with it, memcg_kmem_put_cache() must be called to release the
 | 
					 * done with it, memcg_kmem_put_cache() must be called to release the
 | 
				
			||||||
 * reference.
 | 
					 * reference.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
 | 
					struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep,
 | 
				
			||||||
 | 
										struct obj_cgroup **objcgp)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct mem_cgroup *memcg;
 | 
						struct mem_cgroup *memcg;
 | 
				
			||||||
	struct kmem_cache *memcg_cachep;
 | 
						struct kmem_cache *memcg_cachep;
 | 
				
			||||||
| 
						 | 
					@ -3029,8 +3030,17 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (unlikely(!memcg_cachep))
 | 
						if (unlikely(!memcg_cachep))
 | 
				
			||||||
		memcg_schedule_kmem_cache_create(memcg, cachep);
 | 
							memcg_schedule_kmem_cache_create(memcg, cachep);
 | 
				
			||||||
	else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt))
 | 
						else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt)) {
 | 
				
			||||||
 | 
							struct obj_cgroup *objcg = rcu_dereference(memcg->objcg);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (!objcg || !obj_cgroup_tryget(objcg)) {
 | 
				
			||||||
 | 
								percpu_ref_put(&memcg_cachep->memcg_params.refcnt);
 | 
				
			||||||
 | 
								goto out_unlock;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							*objcgp = objcg;
 | 
				
			||||||
		cachep = memcg_cachep;
 | 
							cachep = memcg_cachep;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
out_unlock:
 | 
					out_unlock:
 | 
				
			||||||
	rcu_read_unlock();
 | 
						rcu_read_unlock();
 | 
				
			||||||
	return cachep;
 | 
						return cachep;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										18
									
								
								mm/slab.c
									
									
									
									
									
								
							
							
						
						
									
										18
									
								
								mm/slab.c
									
									
									
									
									
								
							| 
						 | 
					@ -3228,9 +3228,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
 | 
				
			||||||
	unsigned long save_flags;
 | 
						unsigned long save_flags;
 | 
				
			||||||
	void *ptr;
 | 
						void *ptr;
 | 
				
			||||||
	int slab_node = numa_mem_id();
 | 
						int slab_node = numa_mem_id();
 | 
				
			||||||
 | 
						struct obj_cgroup *objcg = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	flags &= gfp_allowed_mask;
 | 
						flags &= gfp_allowed_mask;
 | 
				
			||||||
	cachep = slab_pre_alloc_hook(cachep, flags);
 | 
						cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
 | 
				
			||||||
	if (unlikely(!cachep))
 | 
						if (unlikely(!cachep))
 | 
				
			||||||
		return NULL;
 | 
							return NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3266,7 +3267,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
 | 
				
			||||||
	if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
 | 
						if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
 | 
				
			||||||
		memset(ptr, 0, cachep->object_size);
 | 
							memset(ptr, 0, cachep->object_size);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	slab_post_alloc_hook(cachep, flags, 1, &ptr);
 | 
						slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
 | 
				
			||||||
	return ptr;
 | 
						return ptr;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3307,9 +3308,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long save_flags;
 | 
						unsigned long save_flags;
 | 
				
			||||||
	void *objp;
 | 
						void *objp;
 | 
				
			||||||
 | 
						struct obj_cgroup *objcg = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	flags &= gfp_allowed_mask;
 | 
						flags &= gfp_allowed_mask;
 | 
				
			||||||
	cachep = slab_pre_alloc_hook(cachep, flags);
 | 
						cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
 | 
				
			||||||
	if (unlikely(!cachep))
 | 
						if (unlikely(!cachep))
 | 
				
			||||||
		return NULL;
 | 
							return NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3323,7 +3325,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
 | 
				
			||||||
	if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
 | 
						if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
 | 
				
			||||||
		memset(objp, 0, cachep->object_size);
 | 
							memset(objp, 0, cachep->object_size);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	slab_post_alloc_hook(cachep, flags, 1, &objp);
 | 
						slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
 | 
				
			||||||
	return objp;
 | 
						return objp;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3450,6 +3452,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
 | 
				
			||||||
		memset(objp, 0, cachep->object_size);
 | 
							memset(objp, 0, cachep->object_size);
 | 
				
			||||||
	kmemleak_free_recursive(objp, cachep->flags);
 | 
						kmemleak_free_recursive(objp, cachep->flags);
 | 
				
			||||||
	objp = cache_free_debugcheck(cachep, objp, caller);
 | 
						objp = cache_free_debugcheck(cachep, objp, caller);
 | 
				
			||||||
 | 
						memcg_slab_free_hook(cachep, virt_to_head_page(objp), objp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Skip calling cache_free_alien() when the platform is not numa.
 | 
						 * Skip calling cache_free_alien() when the platform is not numa.
 | 
				
			||||||
| 
						 | 
					@ -3515,8 +3518,9 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 | 
				
			||||||
			  void **p)
 | 
								  void **p)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	size_t i;
 | 
						size_t i;
 | 
				
			||||||
 | 
						struct obj_cgroup *objcg = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	s = slab_pre_alloc_hook(s, flags);
 | 
						s = slab_pre_alloc_hook(s, &objcg, size, flags);
 | 
				
			||||||
	if (!s)
 | 
						if (!s)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3539,13 +3543,13 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 | 
				
			||||||
		for (i = 0; i < size; i++)
 | 
							for (i = 0; i < size; i++)
 | 
				
			||||||
			memset(p[i], 0, s->object_size);
 | 
								memset(p[i], 0, s->object_size);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	slab_post_alloc_hook(s, flags, size, p);
 | 
						slab_post_alloc_hook(s, objcg, flags, size, p);
 | 
				
			||||||
	/* FIXME: Trace call missing. Christoph would like a bulk variant */
 | 
						/* FIXME: Trace call missing. Christoph would like a bulk variant */
 | 
				
			||||||
	return size;
 | 
						return size;
 | 
				
			||||||
error:
 | 
					error:
 | 
				
			||||||
	local_irq_enable();
 | 
						local_irq_enable();
 | 
				
			||||||
	cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
 | 
						cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
 | 
				
			||||||
	slab_post_alloc_hook(s, flags, i, p);
 | 
						slab_post_alloc_hook(s, objcg, flags, i, p);
 | 
				
			||||||
	__kmem_cache_free_bulk(s, i, p);
 | 
						__kmem_cache_free_bulk(s, i, p);
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										60
									
								
								mm/slab.h
									
									
									
									
									
								
							
							
						
						
									
										60
									
								
								mm/slab.h
									
									
									
									
									
								
							| 
						 | 
					@ -470,6 +470,41 @@ static inline void memcg_free_page_obj_cgroups(struct page *page)
 | 
				
			||||||
	page->obj_cgroups = NULL;
 | 
						page->obj_cgroups = NULL;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
 | 
				
			||||||
 | 
										      struct obj_cgroup *objcg,
 | 
				
			||||||
 | 
										      size_t size, void **p)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct page *page;
 | 
				
			||||||
 | 
						unsigned long off;
 | 
				
			||||||
 | 
						size_t i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < size; i++) {
 | 
				
			||||||
 | 
							if (likely(p[i])) {
 | 
				
			||||||
 | 
								page = virt_to_head_page(p[i]);
 | 
				
			||||||
 | 
								off = obj_to_index(s, page, p[i]);
 | 
				
			||||||
 | 
								obj_cgroup_get(objcg);
 | 
				
			||||||
 | 
								page_obj_cgroups(page)[off] = objcg;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						obj_cgroup_put(objcg);
 | 
				
			||||||
 | 
						memcg_kmem_put_cache(s);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
 | 
				
			||||||
 | 
										void *p)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct obj_cgroup *objcg;
 | 
				
			||||||
 | 
						unsigned int off;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!memcg_kmem_enabled() || is_root_cache(s))
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						off = obj_to_index(s, page, p);
 | 
				
			||||||
 | 
						objcg = page_obj_cgroups(page)[off];
 | 
				
			||||||
 | 
						page_obj_cgroups(page)[off] = NULL;
 | 
				
			||||||
 | 
						obj_cgroup_put(objcg);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void slab_init_memcg_params(struct kmem_cache *);
 | 
					extern void slab_init_memcg_params(struct kmem_cache *);
 | 
				
			||||||
extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
 | 
					extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -529,6 +564,17 @@ static inline void memcg_free_page_obj_cgroups(struct page *page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
 | 
				
			||||||
 | 
										      struct obj_cgroup *objcg,
 | 
				
			||||||
 | 
										      size_t size, void **p)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
 | 
				
			||||||
 | 
										void *p)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void slab_init_memcg_params(struct kmem_cache *s)
 | 
					static inline void slab_init_memcg_params(struct kmem_cache *s)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -631,7 +677,8 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
 | 
					static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
 | 
				
			||||||
						     gfp_t flags)
 | 
											     struct obj_cgroup **objcgp,
 | 
				
			||||||
 | 
											     size_t size, gfp_t flags)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	flags &= gfp_allowed_mask;
 | 
						flags &= gfp_allowed_mask;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -645,13 +692,14 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (memcg_kmem_enabled() &&
 | 
						if (memcg_kmem_enabled() &&
 | 
				
			||||||
	    ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
 | 
						    ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
 | 
				
			||||||
		return memcg_kmem_get_cache(s);
 | 
							return memcg_kmem_get_cache(s, objcgp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return s;
 | 
						return s;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
 | 
					static inline void slab_post_alloc_hook(struct kmem_cache *s,
 | 
				
			||||||
					size_t size, void **p)
 | 
										struct obj_cgroup *objcg,
 | 
				
			||||||
 | 
										gfp_t flags, size_t size, void **p)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	size_t i;
 | 
						size_t i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -663,8 +711,8 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
 | 
				
			||||||
					 s->flags, flags);
 | 
										 s->flags, flags);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (memcg_kmem_enabled())
 | 
						if (memcg_kmem_enabled() && !is_root_cache(s))
 | 
				
			||||||
		memcg_kmem_put_cache(s);
 | 
							memcg_slab_post_alloc_hook(s, objcg, size, p);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifndef CONFIG_SLOB
 | 
					#ifndef CONFIG_SLOB
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										14
									
								
								mm/slub.c
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								mm/slub.c
									
									
									
									
									
								
							| 
						 | 
					@ -2817,8 +2817,9 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
 | 
				
			||||||
	struct kmem_cache_cpu *c;
 | 
						struct kmem_cache_cpu *c;
 | 
				
			||||||
	struct page *page;
 | 
						struct page *page;
 | 
				
			||||||
	unsigned long tid;
 | 
						unsigned long tid;
 | 
				
			||||||
 | 
						struct obj_cgroup *objcg = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	s = slab_pre_alloc_hook(s, gfpflags);
 | 
						s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags);
 | 
				
			||||||
	if (!s)
 | 
						if (!s)
 | 
				
			||||||
		return NULL;
 | 
							return NULL;
 | 
				
			||||||
redo:
 | 
					redo:
 | 
				
			||||||
| 
						 | 
					@ -2894,7 +2895,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
 | 
				
			||||||
	if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
 | 
						if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
 | 
				
			||||||
		memset(object, 0, s->object_size);
 | 
							memset(object, 0, s->object_size);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	slab_post_alloc_hook(s, gfpflags, 1, &object);
 | 
						slab_post_alloc_hook(s, objcg, gfpflags, 1, &object);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return object;
 | 
						return object;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -3099,6 +3100,8 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
 | 
				
			||||||
	void *tail_obj = tail ? : head;
 | 
						void *tail_obj = tail ? : head;
 | 
				
			||||||
	struct kmem_cache_cpu *c;
 | 
						struct kmem_cache_cpu *c;
 | 
				
			||||||
	unsigned long tid;
 | 
						unsigned long tid;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						memcg_slab_free_hook(s, page, head);
 | 
				
			||||||
redo:
 | 
					redo:
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Determine the currently cpus per cpu slab.
 | 
						 * Determine the currently cpus per cpu slab.
 | 
				
			||||||
| 
						 | 
					@ -3278,9 +3281,10 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct kmem_cache_cpu *c;
 | 
						struct kmem_cache_cpu *c;
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
 | 
						struct obj_cgroup *objcg = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* memcg and kmem_cache debug support */
 | 
						/* memcg and kmem_cache debug support */
 | 
				
			||||||
	s = slab_pre_alloc_hook(s, flags);
 | 
						s = slab_pre_alloc_hook(s, &objcg, size, flags);
 | 
				
			||||||
	if (unlikely(!s))
 | 
						if (unlikely(!s))
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					@ -3334,11 +3338,11 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* memcg and kmem_cache debug support */
 | 
						/* memcg and kmem_cache debug support */
 | 
				
			||||||
	slab_post_alloc_hook(s, flags, size, p);
 | 
						slab_post_alloc_hook(s, objcg, flags, size, p);
 | 
				
			||||||
	return i;
 | 
						return i;
 | 
				
			||||||
error:
 | 
					error:
 | 
				
			||||||
	local_irq_enable();
 | 
						local_irq_enable();
 | 
				
			||||||
	slab_post_alloc_hook(s, flags, i, p);
 | 
						slab_post_alloc_hook(s, objcg, flags, i, p);
 | 
				
			||||||
	__kmem_cache_free_bulk(s, i, p);
 | 
						__kmem_cache_free_bulk(s, i, p);
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue