forked from mirrors/linux
		
	mm: optimize page_lock_anon_vma() fast-path
Optimize the page_lock_anon_vma() fast path to be one atomic op, instead of two. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Miller <davem@davemloft.net> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Tony Luck <tony.luck@intel.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									2b575eb64f
								
							
						
					
					
						commit
						88c22088bf
					
				
					 1 changed files with 82 additions and 4 deletions
				
			
		
							
								
								
									
										86
									
								
								mm/rmap.c
									
									
									
									
									
								
							
							
						
						
									
										86
									
								
								mm/rmap.c
									
									
									
									
									
								
							|  | @ -86,6 +86,29 @@ static inline struct anon_vma *anon_vma_alloc(void) | ||||||
| static inline void anon_vma_free(struct anon_vma *anon_vma) | static inline void anon_vma_free(struct anon_vma *anon_vma) | ||||||
| { | { | ||||||
| 	VM_BUG_ON(atomic_read(&anon_vma->refcount)); | 	VM_BUG_ON(atomic_read(&anon_vma->refcount)); | ||||||
|  | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * Synchronize against page_lock_anon_vma() such that | ||||||
|  | 	 * we can safely hold the lock without the anon_vma getting | ||||||
|  | 	 * freed. | ||||||
|  | 	 * | ||||||
|  | 	 * Relies on the full mb implied by the atomic_dec_and_test() from | ||||||
|  | 	 * put_anon_vma() against the acquire barrier implied by | ||||||
|  | 	 * mutex_trylock() from page_lock_anon_vma(). This orders: | ||||||
|  | 	 * | ||||||
|  | 	 * page_lock_anon_vma()		VS	put_anon_vma() | ||||||
|  | 	 *   mutex_trylock()			  atomic_dec_and_test() | ||||||
|  | 	 *   LOCK				  MB | ||||||
|  | 	 *   atomic_read()			  mutex_is_locked() | ||||||
|  | 	 * | ||||||
|  | 	 * LOCK should suffice since the actual taking of the lock must | ||||||
|  | 	 * happen _before_ what follows. | ||||||
|  | 	 */ | ||||||
|  | 	if (mutex_is_locked(&anon_vma->root->mutex)) { | ||||||
|  | 		anon_vma_lock(anon_vma); | ||||||
|  | 		anon_vma_unlock(anon_vma); | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	kmem_cache_free(anon_vma_cachep, anon_vma); | 	kmem_cache_free(anon_vma_cachep, anon_vma); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -372,20 +395,75 @@ struct anon_vma *page_get_anon_vma(struct page *page) | ||||||
| 	return anon_vma; | 	return anon_vma; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /*
 | ||||||
|  |  * Similar to page_get_anon_vma() except it locks the anon_vma. | ||||||
|  |  * | ||||||
|  |  * Its a little more complex as it tries to keep the fast path to a single | ||||||
|  |  * atomic op -- the trylock. If we fail the trylock, we fall back to getting a | ||||||
|  |  * reference like with page_get_anon_vma() and then block on the mutex. | ||||||
|  |  */ | ||||||
| struct anon_vma *page_lock_anon_vma(struct page *page) | struct anon_vma *page_lock_anon_vma(struct page *page) | ||||||
| { | { | ||||||
| 	struct anon_vma *anon_vma = page_get_anon_vma(page); | 	struct anon_vma *anon_vma = NULL; | ||||||
|  | 	unsigned long anon_mapping; | ||||||
| 
 | 
 | ||||||
| 	if (anon_vma) | 	rcu_read_lock(); | ||||||
| 		anon_vma_lock(anon_vma); | 	anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); | ||||||
|  | 	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) | ||||||
|  | 		goto out; | ||||||
|  | 	if (!page_mapped(page)) | ||||||
|  | 		goto out; | ||||||
| 
 | 
 | ||||||
|  | 	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); | ||||||
|  | 	if (mutex_trylock(&anon_vma->root->mutex)) { | ||||||
|  | 		/*
 | ||||||
|  | 		 * If we observe a !0 refcount, then holding the lock ensures | ||||||
|  | 		 * the anon_vma will not go away, see __put_anon_vma(). | ||||||
|  | 		 */ | ||||||
|  | 		if (!atomic_read(&anon_vma->refcount)) { | ||||||
|  | 			anon_vma_unlock(anon_vma); | ||||||
|  | 			anon_vma = NULL; | ||||||
|  | 		} | ||||||
|  | 		goto out; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	/* trylock failed, we got to sleep */ | ||||||
|  | 	if (!atomic_inc_not_zero(&anon_vma->refcount)) { | ||||||
|  | 		anon_vma = NULL; | ||||||
|  | 		goto out; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if (!page_mapped(page)) { | ||||||
|  | 		put_anon_vma(anon_vma); | ||||||
|  | 		anon_vma = NULL; | ||||||
|  | 		goto out; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	/* we pinned the anon_vma, its safe to sleep */ | ||||||
|  | 	rcu_read_unlock(); | ||||||
|  | 	anon_vma_lock(anon_vma); | ||||||
|  | 
 | ||||||
|  | 	if (atomic_dec_and_test(&anon_vma->refcount)) { | ||||||
|  | 		/*
 | ||||||
|  | 		 * Oops, we held the last refcount, release the lock | ||||||
|  | 		 * and bail -- can't simply use put_anon_vma() because | ||||||
|  | 		 * we'll deadlock on the anon_vma_lock() recursion. | ||||||
|  | 		 */ | ||||||
|  | 		anon_vma_unlock(anon_vma); | ||||||
|  | 		__put_anon_vma(anon_vma); | ||||||
|  | 		anon_vma = NULL; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return anon_vma; | ||||||
|  | 
 | ||||||
|  | out: | ||||||
|  | 	rcu_read_unlock(); | ||||||
| 	return anon_vma; | 	return anon_vma; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void page_unlock_anon_vma(struct anon_vma *anon_vma) | void page_unlock_anon_vma(struct anon_vma *anon_vma) | ||||||
| { | { | ||||||
| 	anon_vma_unlock(anon_vma); | 	anon_vma_unlock(anon_vma); | ||||||
| 	put_anon_vma(anon_vma); |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Peter Zijlstra
						Peter Zijlstra