mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	slab: introduce alien_cache
Currently, we use array_cache for alien_cache. Although they are mostly similar, there is one difference, that is, need for spinlock. We don't need spinlock for array_cache itself, but to use array_cache for alien_cache, array_cache structure should have spinlock. This is needless overhead, so removing it would be better. This patch prepare it by introducing alien_cache and using it. In the following patch, we remove spinlock in array_cache. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									1fe00d50a9
								
							
						
					
					
						commit
						c8522a3a58
					
				
					 2 changed files with 67 additions and 41 deletions
				
			
		
							
								
								
									
										106
									
								
								mm/slab.c
									
									
									
									
									
								
							
							
						
						
									
										106
									
								
								mm/slab.c
									
									
									
									
									
								
							| 
						 | 
					@ -203,6 +203,11 @@ struct array_cache {
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct alien_cache {
 | 
				
			||||||
 | 
						spinlock_t lock;
 | 
				
			||||||
 | 
						struct array_cache ac;
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define SLAB_OBJ_PFMEMALLOC	1
 | 
					#define SLAB_OBJ_PFMEMALLOC	1
 | 
				
			||||||
static inline bool is_obj_pfmemalloc(void *objp)
 | 
					static inline bool is_obj_pfmemalloc(void *objp)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -491,7 +496,7 @@ static void slab_set_lock_classes(struct kmem_cache *cachep,
 | 
				
			||||||
		struct lock_class_key *l3_key, struct lock_class_key *alc_key,
 | 
							struct lock_class_key *l3_key, struct lock_class_key *alc_key,
 | 
				
			||||||
		struct kmem_cache_node *n)
 | 
							struct kmem_cache_node *n)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct array_cache **alc;
 | 
						struct alien_cache **alc;
 | 
				
			||||||
	int r;
 | 
						int r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	lockdep_set_class(&n->list_lock, l3_key);
 | 
						lockdep_set_class(&n->list_lock, l3_key);
 | 
				
			||||||
| 
						 | 
					@ -507,7 +512,7 @@ static void slab_set_lock_classes(struct kmem_cache *cachep,
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	for_each_node(r) {
 | 
						for_each_node(r) {
 | 
				
			||||||
		if (alc[r])
 | 
							if (alc[r])
 | 
				
			||||||
			lockdep_set_class(&alc[r]->lock, alc_key);
 | 
								lockdep_set_class(&(alc[r]->ac.lock), alc_key);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -965,12 +970,13 @@ static int transfer_objects(struct array_cache *to,
 | 
				
			||||||
#define drain_alien_cache(cachep, alien) do { } while (0)
 | 
					#define drain_alien_cache(cachep, alien) do { } while (0)
 | 
				
			||||||
#define reap_alien(cachep, n) do { } while (0)
 | 
					#define reap_alien(cachep, n) do { } while (0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
 | 
					static inline struct alien_cache **alloc_alien_cache(int node,
 | 
				
			||||||
 | 
											int limit, gfp_t gfp)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return (struct array_cache **)BAD_ALIEN_MAGIC;
 | 
						return (struct alien_cache **)BAD_ALIEN_MAGIC;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void free_alien_cache(struct array_cache **ac_ptr)
 | 
					static inline void free_alien_cache(struct alien_cache **ac_ptr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -996,40 +1002,52 @@ static inline void *____cache_alloc_node(struct kmem_cache *cachep,
 | 
				
			||||||
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
 | 
					static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
 | 
				
			||||||
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
 | 
					static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
 | 
					static struct alien_cache *__alloc_alien_cache(int node, int entries,
 | 
				
			||||||
 | 
											int batch, gfp_t gfp)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct array_cache **ac_ptr;
 | 
						int memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
 | 
				
			||||||
 | 
						struct alien_cache *alc = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						alc = kmalloc_node(memsize, gfp, node);
 | 
				
			||||||
 | 
						init_arraycache(&alc->ac, entries, batch);
 | 
				
			||||||
 | 
						return alc;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct alien_cache **alc_ptr;
 | 
				
			||||||
	int memsize = sizeof(void *) * nr_node_ids;
 | 
						int memsize = sizeof(void *) * nr_node_ids;
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (limit > 1)
 | 
						if (limit > 1)
 | 
				
			||||||
		limit = 12;
 | 
							limit = 12;
 | 
				
			||||||
	ac_ptr = kzalloc_node(memsize, gfp, node);
 | 
						alc_ptr = kzalloc_node(memsize, gfp, node);
 | 
				
			||||||
	if (ac_ptr) {
 | 
						if (!alc_ptr)
 | 
				
			||||||
		for_each_node(i) {
 | 
							return NULL;
 | 
				
			||||||
			if (i == node || !node_online(i))
 | 
					
 | 
				
			||||||
				continue;
 | 
						for_each_node(i) {
 | 
				
			||||||
			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
 | 
							if (i == node || !node_online(i))
 | 
				
			||||||
			if (!ac_ptr[i]) {
 | 
								continue;
 | 
				
			||||||
				for (i--; i >= 0; i--)
 | 
							alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
 | 
				
			||||||
					kfree(ac_ptr[i]);
 | 
							if (!alc_ptr[i]) {
 | 
				
			||||||
				kfree(ac_ptr);
 | 
								for (i--; i >= 0; i--)
 | 
				
			||||||
				return NULL;
 | 
									kfree(alc_ptr[i]);
 | 
				
			||||||
			}
 | 
								kfree(alc_ptr);
 | 
				
			||||||
 | 
								return NULL;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return ac_ptr;
 | 
						return alc_ptr;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void free_alien_cache(struct array_cache **ac_ptr)
 | 
					static void free_alien_cache(struct alien_cache **alc_ptr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!ac_ptr)
 | 
						if (!alc_ptr)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	for_each_node(i)
 | 
						for_each_node(i)
 | 
				
			||||||
	    kfree(ac_ptr[i]);
 | 
						    kfree(alc_ptr[i]);
 | 
				
			||||||
	kfree(ac_ptr);
 | 
						kfree(alc_ptr);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void __drain_alien_cache(struct kmem_cache *cachep,
 | 
					static void __drain_alien_cache(struct kmem_cache *cachep,
 | 
				
			||||||
| 
						 | 
					@ -1063,25 +1081,31 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
 | 
				
			||||||
	int node = __this_cpu_read(slab_reap_node);
 | 
						int node = __this_cpu_read(slab_reap_node);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (n->alien) {
 | 
						if (n->alien) {
 | 
				
			||||||
		struct array_cache *ac = n->alien[node];
 | 
							struct alien_cache *alc = n->alien[node];
 | 
				
			||||||
 | 
							struct array_cache *ac;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
 | 
							if (alc) {
 | 
				
			||||||
			__drain_alien_cache(cachep, ac, node);
 | 
								ac = &alc->ac;
 | 
				
			||||||
			spin_unlock_irq(&ac->lock);
 | 
								if (ac->avail && spin_trylock_irq(&ac->lock)) {
 | 
				
			||||||
 | 
									__drain_alien_cache(cachep, ac, node);
 | 
				
			||||||
 | 
									spin_unlock_irq(&ac->lock);
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void drain_alien_cache(struct kmem_cache *cachep,
 | 
					static void drain_alien_cache(struct kmem_cache *cachep,
 | 
				
			||||||
				struct array_cache **alien)
 | 
									struct alien_cache **alien)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int i = 0;
 | 
						int i = 0;
 | 
				
			||||||
 | 
						struct alien_cache *alc;
 | 
				
			||||||
	struct array_cache *ac;
 | 
						struct array_cache *ac;
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_online_node(i) {
 | 
						for_each_online_node(i) {
 | 
				
			||||||
		ac = alien[i];
 | 
							alc = alien[i];
 | 
				
			||||||
		if (ac) {
 | 
							if (alc) {
 | 
				
			||||||
 | 
								ac = &alc->ac;
 | 
				
			||||||
			spin_lock_irqsave(&ac->lock, flags);
 | 
								spin_lock_irqsave(&ac->lock, flags);
 | 
				
			||||||
			__drain_alien_cache(cachep, ac, i);
 | 
								__drain_alien_cache(cachep, ac, i);
 | 
				
			||||||
			spin_unlock_irqrestore(&ac->lock, flags);
 | 
								spin_unlock_irqrestore(&ac->lock, flags);
 | 
				
			||||||
| 
						 | 
					@ -1093,7 +1117,8 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int nodeid = page_to_nid(virt_to_page(objp));
 | 
						int nodeid = page_to_nid(virt_to_page(objp));
 | 
				
			||||||
	struct kmem_cache_node *n;
 | 
						struct kmem_cache_node *n;
 | 
				
			||||||
	struct array_cache *alien = NULL;
 | 
						struct alien_cache *alien = NULL;
 | 
				
			||||||
 | 
						struct array_cache *ac;
 | 
				
			||||||
	int node;
 | 
						int node;
 | 
				
			||||||
	LIST_HEAD(list);
 | 
						LIST_HEAD(list);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1110,13 +1135,14 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
 | 
				
			||||||
	STATS_INC_NODEFREES(cachep);
 | 
						STATS_INC_NODEFREES(cachep);
 | 
				
			||||||
	if (n->alien && n->alien[nodeid]) {
 | 
						if (n->alien && n->alien[nodeid]) {
 | 
				
			||||||
		alien = n->alien[nodeid];
 | 
							alien = n->alien[nodeid];
 | 
				
			||||||
		spin_lock(&alien->lock);
 | 
							ac = &alien->ac;
 | 
				
			||||||
		if (unlikely(alien->avail == alien->limit)) {
 | 
							spin_lock(&ac->lock);
 | 
				
			||||||
 | 
							if (unlikely(ac->avail == ac->limit)) {
 | 
				
			||||||
			STATS_INC_ACOVERFLOW(cachep);
 | 
								STATS_INC_ACOVERFLOW(cachep);
 | 
				
			||||||
			__drain_alien_cache(cachep, alien, nodeid);
 | 
								__drain_alien_cache(cachep, ac, nodeid);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		ac_put_obj(cachep, alien, objp);
 | 
							ac_put_obj(cachep, ac, objp);
 | 
				
			||||||
		spin_unlock(&alien->lock);
 | 
							spin_unlock(&ac->lock);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		n = get_node(cachep, nodeid);
 | 
							n = get_node(cachep, nodeid);
 | 
				
			||||||
		spin_lock(&n->list_lock);
 | 
							spin_lock(&n->list_lock);
 | 
				
			||||||
| 
						 | 
					@ -1191,7 +1217,7 @@ static void cpuup_canceled(long cpu)
 | 
				
			||||||
	list_for_each_entry(cachep, &slab_caches, list) {
 | 
						list_for_each_entry(cachep, &slab_caches, list) {
 | 
				
			||||||
		struct array_cache *nc;
 | 
							struct array_cache *nc;
 | 
				
			||||||
		struct array_cache *shared;
 | 
							struct array_cache *shared;
 | 
				
			||||||
		struct array_cache **alien;
 | 
							struct alien_cache **alien;
 | 
				
			||||||
		LIST_HEAD(list);
 | 
							LIST_HEAD(list);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* cpu is dead; no one can alloc from it. */
 | 
							/* cpu is dead; no one can alloc from it. */
 | 
				
			||||||
| 
						 | 
					@ -1272,7 +1298,7 @@ static int cpuup_prepare(long cpu)
 | 
				
			||||||
	list_for_each_entry(cachep, &slab_caches, list) {
 | 
						list_for_each_entry(cachep, &slab_caches, list) {
 | 
				
			||||||
		struct array_cache *nc;
 | 
							struct array_cache *nc;
 | 
				
			||||||
		struct array_cache *shared = NULL;
 | 
							struct array_cache *shared = NULL;
 | 
				
			||||||
		struct array_cache **alien = NULL;
 | 
							struct alien_cache **alien = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		nc = alloc_arraycache(node, cachep->limit,
 | 
							nc = alloc_arraycache(node, cachep->limit,
 | 
				
			||||||
					cachep->batchcount, GFP_KERNEL);
 | 
										cachep->batchcount, GFP_KERNEL);
 | 
				
			||||||
| 
						 | 
					@ -3762,7 +3788,7 @@ static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
 | 
				
			||||||
	int node;
 | 
						int node;
 | 
				
			||||||
	struct kmem_cache_node *n;
 | 
						struct kmem_cache_node *n;
 | 
				
			||||||
	struct array_cache *new_shared;
 | 
						struct array_cache *new_shared;
 | 
				
			||||||
	struct array_cache **new_alien = NULL;
 | 
						struct alien_cache **new_alien = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_online_node(node) {
 | 
						for_each_online_node(node) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -276,7 +276,7 @@ struct kmem_cache_node {
 | 
				
			||||||
	unsigned int free_limit;
 | 
						unsigned int free_limit;
 | 
				
			||||||
	unsigned int colour_next;	/* Per-node cache coloring */
 | 
						unsigned int colour_next;	/* Per-node cache coloring */
 | 
				
			||||||
	struct array_cache *shared;	/* shared per node */
 | 
						struct array_cache *shared;	/* shared per node */
 | 
				
			||||||
	struct array_cache **alien;	/* on other nodes */
 | 
						struct alien_cache **alien;	/* on other nodes */
 | 
				
			||||||
	unsigned long next_reap;	/* updated without locking */
 | 
						unsigned long next_reap;	/* updated without locking */
 | 
				
			||||||
	int free_touched;		/* updated without locking */
 | 
						int free_touched;		/* updated without locking */
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue