mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm/sl[aou]b: Extract a common function for kmem_cache_destroy
kmem_cache_destroy does basically the same in all allocators. Extract common code which is easy since we already have common mutex handling. Reviewed-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
		
							parent
							
								
									7c9adf5a54
								
							
						
					
					
						commit
						945cf2b619
					
				
					 5 changed files with 49 additions and 75 deletions
				
			
		
							
								
								
									
										45
									
								
								mm/slab.c
									
									
									
									
									
								
							
							
						
						
									
										45
									
								
								mm/slab.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -2206,7 +2206,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void __kmem_cache_destroy(struct kmem_cache *cachep)
 | 
			
		||||
void __kmem_cache_destroy(struct kmem_cache *cachep)
 | 
			
		||||
{
 | 
			
		||||
	int i;
 | 
			
		||||
	struct kmem_list3 *l3;
 | 
			
		||||
| 
						 | 
				
			
			@ -2763,50 +2763,11 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL(kmem_cache_shrink);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * kmem_cache_destroy - delete a cache
 | 
			
		||||
 * @cachep: the cache to destroy
 | 
			
		||||
 *
 | 
			
		||||
 * Remove a &struct kmem_cache object from the slab cache.
 | 
			
		||||
 *
 | 
			
		||||
 * It is expected this function will be called by a module when it is
 | 
			
		||||
 * unloaded.  This will remove the cache completely, and avoid a duplicate
 | 
			
		||||
 * cache being allocated each time a module is loaded and unloaded, if the
 | 
			
		||||
 * module doesn't have persistent in-kernel storage across loads and unloads.
 | 
			
		||||
 *
 | 
			
		||||
 * The cache must be empty before calling this function.
 | 
			
		||||
 *
 | 
			
		||||
 * The caller must guarantee that no one will allocate memory from the cache
 | 
			
		||||
 * during the kmem_cache_destroy().
 | 
			
		||||
 */
 | 
			
		||||
void kmem_cache_destroy(struct kmem_cache *cachep)
 | 
			
		||||
int __kmem_cache_shutdown(struct kmem_cache *cachep)
 | 
			
		||||
{
 | 
			
		||||
	BUG_ON(!cachep || in_interrupt());
 | 
			
		||||
 | 
			
		||||
	/* Find the cache in the chain of caches. */
 | 
			
		||||
	get_online_cpus();
 | 
			
		||||
	mutex_lock(&slab_mutex);
 | 
			
		||||
	/*
 | 
			
		||||
	 * the chain is never empty, cache_cache is never destroyed
 | 
			
		||||
	 */
 | 
			
		||||
	list_del(&cachep->list);
 | 
			
		||||
	if (__cache_shrink(cachep)) {
 | 
			
		||||
		slab_error(cachep, "Can't free all objects");
 | 
			
		||||
		list_add(&cachep->list, &slab_caches);
 | 
			
		||||
		mutex_unlock(&slab_mutex);
 | 
			
		||||
		put_online_cpus();
 | 
			
		||||
		return;
 | 
			
		||||
	return __cache_shrink(cachep);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
 | 
			
		||||
		rcu_barrier();
 | 
			
		||||
 | 
			
		||||
	__kmem_cache_destroy(cachep);
 | 
			
		||||
	mutex_unlock(&slab_mutex);
 | 
			
		||||
	put_online_cpus();
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(kmem_cache_destroy);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Get the memory for a slab management obj.
 | 
			
		||||
 * For a slab cache when the slab descriptor is off-slab, slab descriptors
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -30,4 +30,7 @@ extern struct list_head slab_caches;
 | 
			
		|||
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
 | 
			
		||||
	size_t align, unsigned long flags, void (*ctor)(void *));
 | 
			
		||||
 | 
			
		||||
int __kmem_cache_shutdown(struct kmem_cache *);
 | 
			
		||||
void __kmem_cache_destroy(struct kmem_cache *);
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -140,6 +140,31 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL(kmem_cache_create);
 | 
			
		||||
 | 
			
		||||
void kmem_cache_destroy(struct kmem_cache *s)
 | 
			
		||||
{
 | 
			
		||||
	get_online_cpus();
 | 
			
		||||
	mutex_lock(&slab_mutex);
 | 
			
		||||
	s->refcount--;
 | 
			
		||||
	if (!s->refcount) {
 | 
			
		||||
		list_del(&s->list);
 | 
			
		||||
 | 
			
		||||
		if (!__kmem_cache_shutdown(s)) {
 | 
			
		||||
			if (s->flags & SLAB_DESTROY_BY_RCU)
 | 
			
		||||
				rcu_barrier();
 | 
			
		||||
 | 
			
		||||
			__kmem_cache_destroy(s);
 | 
			
		||||
		} else {
 | 
			
		||||
			list_add(&s->list, &slab_caches);
 | 
			
		||||
			printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
 | 
			
		||||
				s->name);
 | 
			
		||||
			dump_stack();
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	mutex_unlock(&slab_mutex);
 | 
			
		||||
	put_online_cpus();
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(kmem_cache_destroy);
 | 
			
		||||
 | 
			
		||||
int slab_is_available(void)
 | 
			
		||||
{
 | 
			
		||||
	return slab_state >= UP;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										15
									
								
								mm/slob.c
									
									
									
									
									
								
							
							
						
						
									
										15
									
								
								mm/slob.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -538,18 +538,11 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
 | 
			
		|||
	return c;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void kmem_cache_destroy(struct kmem_cache *c)
 | 
			
		||||
void __kmem_cache_destroy(struct kmem_cache *c)
 | 
			
		||||
{
 | 
			
		||||
	mutex_lock(&slab_mutex);
 | 
			
		||||
	list_del(&c->list);
 | 
			
		||||
	mutex_unlock(&slab_mutex);
 | 
			
		||||
 | 
			
		||||
	kmemleak_free(c);
 | 
			
		||||
	if (c->flags & SLAB_DESTROY_BY_RCU)
 | 
			
		||||
		rcu_barrier();
 | 
			
		||||
	slob_free(c, sizeof(struct kmem_cache));
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(kmem_cache_destroy);
 | 
			
		||||
 | 
			
		||||
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -617,6 +610,12 @@ unsigned int kmem_cache_size(struct kmem_cache *c)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL(kmem_cache_size);
 | 
			
		||||
 | 
			
		||||
int __kmem_cache_shutdown(struct kmem_cache *c)
 | 
			
		||||
{
 | 
			
		||||
	/* No way to check for remaining objects */
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int kmem_cache_shrink(struct kmem_cache *d)
 | 
			
		||||
{
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										32
									
								
								mm/slub.c
									
									
									
									
									
								
							
							
						
						
									
										32
									
								
								mm/slub.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -624,7 +624,7 @@ static void object_err(struct kmem_cache *s, struct page *page,
 | 
			
		|||
	print_trailer(s, page, object);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
 | 
			
		||||
static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
 | 
			
		||||
{
 | 
			
		||||
	va_list args;
 | 
			
		||||
	char buf[100];
 | 
			
		||||
| 
						 | 
				
			
			@ -3146,7 +3146,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
 | 
			
		|||
				     sizeof(long), GFP_ATOMIC);
 | 
			
		||||
	if (!map)
 | 
			
		||||
		return;
 | 
			
		||||
	slab_err(s, page, "%s", text);
 | 
			
		||||
	slab_err(s, page, text, s->name);
 | 
			
		||||
	slab_lock(page);
 | 
			
		||||
 | 
			
		||||
	get_map(s, page, map);
 | 
			
		||||
| 
						 | 
				
			
			@ -3178,7 +3178,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
 | 
			
		|||
			discard_slab(s, page);
 | 
			
		||||
		} else {
 | 
			
		||||
			list_slab_objects(s, page,
 | 
			
		||||
				"Objects remaining on kmem_cache_close()");
 | 
			
		||||
			"Objects remaining in %s on kmem_cache_close()");
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -3191,7 +3191,6 @@ static inline int kmem_cache_close(struct kmem_cache *s)
 | 
			
		|||
	int node;
 | 
			
		||||
 | 
			
		||||
	flush_all(s);
 | 
			
		||||
	free_percpu(s->cpu_slab);
 | 
			
		||||
	/* Attempt to free all objects */
 | 
			
		||||
	for_each_node_state(node, N_NORMAL_MEMORY) {
 | 
			
		||||
		struct kmem_cache_node *n = get_node(s, node);
 | 
			
		||||
| 
						 | 
				
			
			@ -3200,33 +3199,20 @@ static inline int kmem_cache_close(struct kmem_cache *s)
 | 
			
		|||
		if (n->nr_partial || slabs_node(s, node))
 | 
			
		||||
			return 1;
 | 
			
		||||
	}
 | 
			
		||||
	free_percpu(s->cpu_slab);
 | 
			
		||||
	free_kmem_cache_nodes(s);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Close a cache and release the kmem_cache structure
 | 
			
		||||
 * (must be used for caches created using kmem_cache_create)
 | 
			
		||||
 */
 | 
			
		||||
void kmem_cache_destroy(struct kmem_cache *s)
 | 
			
		||||
int __kmem_cache_shutdown(struct kmem_cache *s)
 | 
			
		||||
{
 | 
			
		||||
	mutex_lock(&slab_mutex);
 | 
			
		||||
	s->refcount--;
 | 
			
		||||
	if (!s->refcount) {
 | 
			
		||||
		list_del(&s->list);
 | 
			
		||||
		mutex_unlock(&slab_mutex);
 | 
			
		||||
		if (kmem_cache_close(s)) {
 | 
			
		||||
			printk(KERN_ERR "SLUB %s: %s called for cache that "
 | 
			
		||||
				"still has objects.\n", s->name, __func__);
 | 
			
		||||
			dump_stack();
 | 
			
		||||
	return kmem_cache_close(s);
 | 
			
		||||
}
 | 
			
		||||
		if (s->flags & SLAB_DESTROY_BY_RCU)
 | 
			
		||||
			rcu_barrier();
 | 
			
		||||
 | 
			
		||||
void __kmem_cache_destroy(struct kmem_cache *s)
 | 
			
		||||
{
 | 
			
		||||
	sysfs_slab_remove(s);
 | 
			
		||||
	} else
 | 
			
		||||
		mutex_unlock(&slab_mutex);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(kmem_cache_destroy);
 | 
			
		||||
 | 
			
		||||
/********************************************************************
 | 
			
		||||
 *		Kmalloc subsystem
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue