forked from mirrors/linux
		
	slub: Acquire_slab() avoid loop
Avoid the loop in acquire slab and simply fail if there is a conflict. This will cause the next page on the list to be considered. Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
		
							parent
							
								
									507effeaba
								
							
						
					
					
						commit
						7ced371971
					
				
					 1 changed files with 15 additions and 13 deletions
				
			
		
							
								
								
									
										28
									
								
								mm/slub.c
									
									
									
									
									
								
							
							
						
						
									
										28
									
								
								mm/slub.c
									
									
									
									
									
								
							| 
						 | 
					@ -1490,12 +1490,12 @@ static inline void remove_partial(struct kmem_cache_node *n,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Lock slab, remove from the partial list and put the object into the
 | 
					 * Remove slab from the partial list, freeze it and
 | 
				
			||||||
 * per cpu freelist.
 | 
					 * return the pointer to the freelist.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * Returns a list of objects or NULL if it fails.
 | 
					 * Returns a list of objects or NULL if it fails.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * Must hold list_lock.
 | 
					 * Must hold list_lock since we modify the partial list.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static inline void *acquire_slab(struct kmem_cache *s,
 | 
					static inline void *acquire_slab(struct kmem_cache *s,
 | 
				
			||||||
		struct kmem_cache_node *n, struct page *page,
 | 
							struct kmem_cache_node *n, struct page *page,
 | 
				
			||||||
| 
						 | 
					@ -1510,22 +1510,24 @@ static inline void *acquire_slab(struct kmem_cache *s,
 | 
				
			||||||
	 * The old freelist is the list of objects for the
 | 
						 * The old freelist is the list of objects for the
 | 
				
			||||||
	 * per cpu allocation list.
 | 
						 * per cpu allocation list.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	do {
 | 
						freelist = page->freelist;
 | 
				
			||||||
		freelist = page->freelist;
 | 
						counters = page->counters;
 | 
				
			||||||
		counters = page->counters;
 | 
						new.counters = counters;
 | 
				
			||||||
		new.counters = counters;
 | 
						if (mode)
 | 
				
			||||||
		if (mode)
 | 
							new.inuse = page->objects;
 | 
				
			||||||
			new.inuse = page->objects;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		VM_BUG_ON(new.frozen);
 | 
						VM_BUG_ON(new.frozen);
 | 
				
			||||||
		new.frozen = 1;
 | 
						new.frozen = 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	} while (!__cmpxchg_double_slab(s, page,
 | 
						if (!__cmpxchg_double_slab(s, page,
 | 
				
			||||||
			freelist, counters,
 | 
								freelist, counters,
 | 
				
			||||||
			NULL, new.counters,
 | 
								NULL, new.counters,
 | 
				
			||||||
			"lock and freeze"));
 | 
								"acquire_slab"))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							return NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	remove_partial(n, page);
 | 
						remove_partial(n, page);
 | 
				
			||||||
 | 
						WARN_ON(!freelist);
 | 
				
			||||||
	return freelist;
 | 
						return freelist;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue