forked from mirrors/linux
		
	slab: use the lock on alien_cache, instead of the lock on array_cache
Now, we have separate alien_cache structure, so it'd be better to hold the lock on alien_cache while manipulating alien_cache. After that, we don't need the lock on array_cache, so remove it. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									c8522a3a58
								
							
						
					
					
						commit
						49dfc304ba
					
				
					 1 changed files with 8 additions and 17 deletions
				
			
		
							
								
								
									
										25
									
								
								mm/slab.c
									
									
									
									
									
								
							
							
						
						
									
										25
									
								
								mm/slab.c
									
									
									
									
									
								
							| 
						 | 
					@ -191,7 +191,6 @@ struct array_cache {
 | 
				
			||||||
	unsigned int limit;
 | 
						unsigned int limit;
 | 
				
			||||||
	unsigned int batchcount;
 | 
						unsigned int batchcount;
 | 
				
			||||||
	unsigned int touched;
 | 
						unsigned int touched;
 | 
				
			||||||
	spinlock_t lock;
 | 
					 | 
				
			||||||
	void *entry[];	/*
 | 
						void *entry[];	/*
 | 
				
			||||||
			 * Must have this definition in here for the proper
 | 
								 * Must have this definition in here for the proper
 | 
				
			||||||
			 * alignment of array_cache. Also simplifies accessing
 | 
								 * alignment of array_cache. Also simplifies accessing
 | 
				
			||||||
| 
						 | 
					@ -512,7 +511,7 @@ static void slab_set_lock_classes(struct kmem_cache *cachep,
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	for_each_node(r) {
 | 
						for_each_node(r) {
 | 
				
			||||||
		if (alc[r])
 | 
							if (alc[r])
 | 
				
			||||||
			lockdep_set_class(&(alc[r]->ac.lock), alc_key);
 | 
								lockdep_set_class(&(alc[r]->lock), alc_key);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -811,7 +810,6 @@ static void init_arraycache(struct array_cache *ac, int limit, int batch)
 | 
				
			||||||
		ac->limit = limit;
 | 
							ac->limit = limit;
 | 
				
			||||||
		ac->batchcount = batch;
 | 
							ac->batchcount = batch;
 | 
				
			||||||
		ac->touched = 0;
 | 
							ac->touched = 0;
 | 
				
			||||||
		spin_lock_init(&ac->lock);
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1010,6 +1008,7 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	alc = kmalloc_node(memsize, gfp, node);
 | 
						alc = kmalloc_node(memsize, gfp, node);
 | 
				
			||||||
	init_arraycache(&alc->ac, entries, batch);
 | 
						init_arraycache(&alc->ac, entries, batch);
 | 
				
			||||||
 | 
						spin_lock_init(&alc->lock);
 | 
				
			||||||
	return alc;
 | 
						return alc;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1086,9 +1085,9 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (alc) {
 | 
							if (alc) {
 | 
				
			||||||
			ac = &alc->ac;
 | 
								ac = &alc->ac;
 | 
				
			||||||
			if (ac->avail && spin_trylock_irq(&ac->lock)) {
 | 
								if (ac->avail && spin_trylock_irq(&alc->lock)) {
 | 
				
			||||||
				__drain_alien_cache(cachep, ac, node);
 | 
									__drain_alien_cache(cachep, ac, node);
 | 
				
			||||||
				spin_unlock_irq(&ac->lock);
 | 
									spin_unlock_irq(&alc->lock);
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -1106,9 +1105,9 @@ static void drain_alien_cache(struct kmem_cache *cachep,
 | 
				
			||||||
		alc = alien[i];
 | 
							alc = alien[i];
 | 
				
			||||||
		if (alc) {
 | 
							if (alc) {
 | 
				
			||||||
			ac = &alc->ac;
 | 
								ac = &alc->ac;
 | 
				
			||||||
			spin_lock_irqsave(&ac->lock, flags);
 | 
								spin_lock_irqsave(&alc->lock, flags);
 | 
				
			||||||
			__drain_alien_cache(cachep, ac, i);
 | 
								__drain_alien_cache(cachep, ac, i);
 | 
				
			||||||
			spin_unlock_irqrestore(&ac->lock, flags);
 | 
								spin_unlock_irqrestore(&alc->lock, flags);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -1136,13 +1135,13 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
 | 
				
			||||||
	if (n->alien && n->alien[nodeid]) {
 | 
						if (n->alien && n->alien[nodeid]) {
 | 
				
			||||||
		alien = n->alien[nodeid];
 | 
							alien = n->alien[nodeid];
 | 
				
			||||||
		ac = &alien->ac;
 | 
							ac = &alien->ac;
 | 
				
			||||||
		spin_lock(&ac->lock);
 | 
							spin_lock(&alien->lock);
 | 
				
			||||||
		if (unlikely(ac->avail == ac->limit)) {
 | 
							if (unlikely(ac->avail == ac->limit)) {
 | 
				
			||||||
			STATS_INC_ACOVERFLOW(cachep);
 | 
								STATS_INC_ACOVERFLOW(cachep);
 | 
				
			||||||
			__drain_alien_cache(cachep, ac, nodeid);
 | 
								__drain_alien_cache(cachep, ac, nodeid);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		ac_put_obj(cachep, ac, objp);
 | 
							ac_put_obj(cachep, ac, objp);
 | 
				
			||||||
		spin_unlock(&ac->lock);
 | 
							spin_unlock(&alien->lock);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		n = get_node(cachep, nodeid);
 | 
							n = get_node(cachep, nodeid);
 | 
				
			||||||
		spin_lock(&n->list_lock);
 | 
							spin_lock(&n->list_lock);
 | 
				
			||||||
| 
						 | 
					@ -1613,10 +1612,6 @@ void __init kmem_cache_init(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		memcpy(ptr, cpu_cache_get(kmem_cache),
 | 
							memcpy(ptr, cpu_cache_get(kmem_cache),
 | 
				
			||||||
		       sizeof(struct arraycache_init));
 | 
							       sizeof(struct arraycache_init));
 | 
				
			||||||
		/*
 | 
					 | 
				
			||||||
		 * Do not assume that spinlocks can be initialized via memcpy:
 | 
					 | 
				
			||||||
		 */
 | 
					 | 
				
			||||||
		spin_lock_init(&ptr->lock);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		kmem_cache->array[smp_processor_id()] = ptr;
 | 
							kmem_cache->array[smp_processor_id()] = ptr;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1626,10 +1621,6 @@ void __init kmem_cache_init(void)
 | 
				
			||||||
		       != &initarray_generic.cache);
 | 
							       != &initarray_generic.cache);
 | 
				
			||||||
		memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]),
 | 
							memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]),
 | 
				
			||||||
		       sizeof(struct arraycache_init));
 | 
							       sizeof(struct arraycache_init));
 | 
				
			||||||
		/*
 | 
					 | 
				
			||||||
		 * Do not assume that spinlocks can be initialized via memcpy:
 | 
					 | 
				
			||||||
		 */
 | 
					 | 
				
			||||||
		spin_lock_init(&ptr->lock);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr;
 | 
							kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue