mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	page-flags: define PG_locked behavior on compound pages
lock_page() must operate on the whole compound page. It doesn't make much sense to lock part of compound page. Change code to use head page's PG_locked, if tail page is passed. This patch also gets rid of custom helper functions -- __set_page_locked() and __clear_page_locked(). They are replaced with helpers generated by __SETPAGEFLAG/__CLEARPAGEFLAG. Tail pages to these helper would trigger VM_BUG_ON(). SLUB uses PG_locked as a bit spin locked. IIUC, tail pages should never appear there. VM_BUG_ON() is added to make sure that this assumption is correct. [akpm@linux-foundation.org: fix fs/cifs/file.c] Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Christoph Lameter <cl@linux.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Steve Capper <steve.capper@linaro.org> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Jerome Marchand <jmarchan@redhat.com> Cc: Jérôme Glisse <jglisse@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									95ad97554a
								
							
						
					
					
						commit
						48c935ad88
					
				
					 11 changed files with 32 additions and 36 deletions
				
			
		| 
						 | 
					@ -3391,13 +3391,13 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
 | 
				
			||||||
	 * should have access to this page, we're safe to simply set
 | 
						 * should have access to this page, we're safe to simply set
 | 
				
			||||||
	 * PG_locked without checking it first.
 | 
						 * PG_locked without checking it first.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	__set_page_locked(page);
 | 
						__SetPageLocked(page);
 | 
				
			||||||
	rc = add_to_page_cache_locked(page, mapping,
 | 
						rc = add_to_page_cache_locked(page, mapping,
 | 
				
			||||||
				      page->index, gfp);
 | 
									      page->index, gfp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* give up if we can't stick it in the cache */
 | 
						/* give up if we can't stick it in the cache */
 | 
				
			||||||
	if (rc) {
 | 
						if (rc) {
 | 
				
			||||||
		__clear_page_locked(page);
 | 
							__ClearPageLocked(page);
 | 
				
			||||||
		return rc;
 | 
							return rc;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3418,9 +3418,9 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
 | 
				
			||||||
		if (*bytes + PAGE_CACHE_SIZE > rsize)
 | 
							if (*bytes + PAGE_CACHE_SIZE > rsize)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		__set_page_locked(page);
 | 
							__SetPageLocked(page);
 | 
				
			||||||
		if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
 | 
							if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
 | 
				
			||||||
			__clear_page_locked(page);
 | 
								__ClearPageLocked(page);
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		list_move_tail(&page->lru, tmplist);
 | 
							list_move_tail(&page->lru, tmplist);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -256,7 +256,7 @@ static inline int __TestClearPage##uname(struct page *page) { return 0; }
 | 
				
			||||||
#define TESTSCFLAG_FALSE(uname)						\
 | 
					#define TESTSCFLAG_FALSE(uname)						\
 | 
				
			||||||
	TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
 | 
						TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
TESTPAGEFLAG(Locked, locked, PF_ANY)
 | 
					__PAGEFLAG(Locked, locked, PF_NO_TAIL)
 | 
				
			||||||
PAGEFLAG(Error, error, PF_ANY) TESTCLEARFLAG(Error, error, PF_ANY)
 | 
					PAGEFLAG(Error, error, PF_ANY) TESTCLEARFLAG(Error, error, PF_ANY)
 | 
				
			||||||
PAGEFLAG(Referenced, referenced, PF_ANY) TESTCLEARFLAG(Referenced, referenced, PF_ANY)
 | 
					PAGEFLAG(Referenced, referenced, PF_ANY) TESTCLEARFLAG(Referenced, referenced, PF_ANY)
 | 
				
			||||||
	__SETPAGEFLAG(Referenced, referenced, PF_ANY)
 | 
						__SETPAGEFLAG(Referenced, referenced, PF_ANY)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -433,18 +433,9 @@ extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 | 
				
			||||||
				unsigned int flags);
 | 
									unsigned int flags);
 | 
				
			||||||
extern void unlock_page(struct page *page);
 | 
					extern void unlock_page(struct page *page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void __set_page_locked(struct page *page)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	__set_bit(PG_locked, &page->flags);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void __clear_page_locked(struct page *page)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	__clear_bit(PG_locked, &page->flags);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline int trylock_page(struct page *page)
 | 
					static inline int trylock_page(struct page *page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						page = compound_head(page);
 | 
				
			||||||
	return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
 | 
						return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -497,9 +488,9 @@ extern int wait_on_page_bit_killable_timeout(struct page *page,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline int wait_on_page_locked_killable(struct page *page)
 | 
					static inline int wait_on_page_locked_killable(struct page *page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (PageLocked(page))
 | 
						if (!PageLocked(page))
 | 
				
			||||||
		return wait_on_page_bit_killable(page, PG_locked);
 | 
							return 0;
 | 
				
			||||||
	return 0;
 | 
						return wait_on_page_bit_killable(compound_head(page), PG_locked);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern wait_queue_head_t *page_waitqueue(struct page *page);
 | 
					extern wait_queue_head_t *page_waitqueue(struct page *page);
 | 
				
			||||||
| 
						 | 
					@ -518,7 +509,7 @@ static inline void wake_up_page(struct page *page, int bit)
 | 
				
			||||||
static inline void wait_on_page_locked(struct page *page)
 | 
					static inline void wait_on_page_locked(struct page *page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (PageLocked(page))
 | 
						if (PageLocked(page))
 | 
				
			||||||
		wait_on_page_bit(page, PG_locked);
 | 
							wait_on_page_bit(compound_head(page), PG_locked);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* 
 | 
					/* 
 | 
				
			||||||
| 
						 | 
					@ -664,17 +655,17 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Like add_to_page_cache_locked, but used to add newly allocated pages:
 | 
					 * Like add_to_page_cache_locked, but used to add newly allocated pages:
 | 
				
			||||||
 * the page is new, so we can just run __set_page_locked() against it.
 | 
					 * the page is new, so we can just run __SetPageLocked() against it.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static inline int add_to_page_cache(struct page *page,
 | 
					static inline int add_to_page_cache(struct page *page,
 | 
				
			||||||
		struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
 | 
							struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int error;
 | 
						int error;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	__set_page_locked(page);
 | 
						__SetPageLocked(page);
 | 
				
			||||||
	error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
 | 
						error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
 | 
				
			||||||
	if (unlikely(error))
 | 
						if (unlikely(error))
 | 
				
			||||||
		__clear_page_locked(page);
 | 
							__ClearPageLocked(page);
 | 
				
			||||||
	return error;
 | 
						return error;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										15
									
								
								mm/filemap.c
									
									
									
									
									
								
							
							
						
						
									
										15
									
								
								mm/filemap.c
									
									
									
									
									
								
							| 
						 | 
					@ -682,11 +682,11 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 | 
				
			||||||
	void *shadow = NULL;
 | 
						void *shadow = NULL;
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	__set_page_locked(page);
 | 
						__SetPageLocked(page);
 | 
				
			||||||
	ret = __add_to_page_cache_locked(page, mapping, offset,
 | 
						ret = __add_to_page_cache_locked(page, mapping, offset,
 | 
				
			||||||
					 gfp_mask, &shadow);
 | 
										 gfp_mask, &shadow);
 | 
				
			||||||
	if (unlikely(ret))
 | 
						if (unlikely(ret))
 | 
				
			||||||
		__clear_page_locked(page);
 | 
							__ClearPageLocked(page);
 | 
				
			||||||
	else {
 | 
						else {
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * The page might have been evicted from cache only
 | 
							 * The page might have been evicted from cache only
 | 
				
			||||||
| 
						 | 
					@ -809,6 +809,7 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue);
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void unlock_page(struct page *page)
 | 
					void unlock_page(struct page *page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						page = compound_head(page);
 | 
				
			||||||
	VM_BUG_ON_PAGE(!PageLocked(page), page);
 | 
						VM_BUG_ON_PAGE(!PageLocked(page), page);
 | 
				
			||||||
	clear_bit_unlock(PG_locked, &page->flags);
 | 
						clear_bit_unlock(PG_locked, &page->flags);
 | 
				
			||||||
	smp_mb__after_atomic();
 | 
						smp_mb__after_atomic();
 | 
				
			||||||
| 
						 | 
					@ -873,18 +874,20 @@ EXPORT_SYMBOL_GPL(page_endio);
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void __lock_page(struct page *page)
 | 
					void __lock_page(struct page *page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
 | 
						struct page *page_head = compound_head(page);
 | 
				
			||||||
 | 
						DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	__wait_on_bit_lock(page_waitqueue(page), &wait, bit_wait_io,
 | 
						__wait_on_bit_lock(page_waitqueue(page_head), &wait, bit_wait_io,
 | 
				
			||||||
							TASK_UNINTERRUPTIBLE);
 | 
												TASK_UNINTERRUPTIBLE);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(__lock_page);
 | 
					EXPORT_SYMBOL(__lock_page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int __lock_page_killable(struct page *page)
 | 
					int __lock_page_killable(struct page *page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
 | 
						struct page *page_head = compound_head(page);
 | 
				
			||||||
 | 
						DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return __wait_on_bit_lock(page_waitqueue(page), &wait,
 | 
						return __wait_on_bit_lock(page_waitqueue(page_head), &wait,
 | 
				
			||||||
					bit_wait_io, TASK_KILLABLE);
 | 
										bit_wait_io, TASK_KILLABLE);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(__lock_page_killable);
 | 
					EXPORT_SYMBOL_GPL(__lock_page_killable);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										2
									
								
								mm/ksm.c
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								mm/ksm.c
									
									
									
									
									
								
							| 
						 | 
					@ -1899,7 +1899,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		SetPageDirty(new_page);
 | 
							SetPageDirty(new_page);
 | 
				
			||||||
		__SetPageUptodate(new_page);
 | 
							__SetPageUptodate(new_page);
 | 
				
			||||||
		__set_page_locked(new_page);
 | 
							__SetPageLocked(new_page);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return new_page;
 | 
						return new_page;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1166,7 +1166,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * We ignore non-LRU pages for good reasons.
 | 
						 * We ignore non-LRU pages for good reasons.
 | 
				
			||||||
	 * - PG_locked is only well defined for LRU pages and a few others
 | 
						 * - PG_locked is only well defined for LRU pages and a few others
 | 
				
			||||||
	 * - to avoid races with __set_page_locked()
 | 
						 * - to avoid races with __SetPageLocked()
 | 
				
			||||||
	 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
 | 
						 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
 | 
				
			||||||
	 * The check (unnecessarily) ignores LRU pages being isolated and
 | 
						 * The check (unnecessarily) ignores LRU pages being isolated and
 | 
				
			||||||
	 * walked by the page reclaim code, however that's not a big loss.
 | 
						 * walked by the page reclaim code, however that's not a big loss.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1767,7 +1767,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 | 
				
			||||||
		flush_tlb_range(vma, mmun_start, mmun_end);
 | 
							flush_tlb_range(vma, mmun_start, mmun_end);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Prepare a page as a migration target */
 | 
						/* Prepare a page as a migration target */
 | 
				
			||||||
	__set_page_locked(new_page);
 | 
						__SetPageLocked(new_page);
 | 
				
			||||||
	SetPageSwapBacked(new_page);
 | 
						SetPageSwapBacked(new_page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* anon mapping, we can simply copy page->mapping to the new page: */
 | 
						/* anon mapping, we can simply copy page->mapping to the new page: */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1085,7 +1085,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
 | 
				
			||||||
	copy_highpage(newpage, oldpage);
 | 
						copy_highpage(newpage, oldpage);
 | 
				
			||||||
	flush_dcache_page(newpage);
 | 
						flush_dcache_page(newpage);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	__set_page_locked(newpage);
 | 
						__SetPageLocked(newpage);
 | 
				
			||||||
	SetPageUptodate(newpage);
 | 
						SetPageUptodate(newpage);
 | 
				
			||||||
	SetPageSwapBacked(newpage);
 | 
						SetPageSwapBacked(newpage);
 | 
				
			||||||
	set_page_private(newpage, swap_index);
 | 
						set_page_private(newpage, swap_index);
 | 
				
			||||||
| 
						 | 
					@ -1277,7 +1277,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		__SetPageSwapBacked(page);
 | 
							__SetPageSwapBacked(page);
 | 
				
			||||||
		__set_page_locked(page);
 | 
							__SetPageLocked(page);
 | 
				
			||||||
		if (sgp == SGP_WRITE)
 | 
							if (sgp == SGP_WRITE)
 | 
				
			||||||
			__SetPageReferenced(page);
 | 
								__SetPageReferenced(page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -338,11 +338,13 @@ static inline int oo_objects(struct kmem_cache_order_objects x)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static __always_inline void slab_lock(struct page *page)
 | 
					static __always_inline void slab_lock(struct page *page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						VM_BUG_ON_PAGE(PageTail(page), page);
 | 
				
			||||||
	bit_spin_lock(PG_locked, &page->flags);
 | 
						bit_spin_lock(PG_locked, &page->flags);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static __always_inline void slab_unlock(struct page *page)
 | 
					static __always_inline void slab_unlock(struct page *page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						VM_BUG_ON_PAGE(PageTail(page), page);
 | 
				
			||||||
	__bit_spin_unlock(PG_locked, &page->flags);
 | 
						__bit_spin_unlock(PG_locked, &page->flags);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -353,7 +353,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
 | 
							/* May fail (-ENOMEM) if radix-tree node allocation failed. */
 | 
				
			||||||
		__set_page_locked(new_page);
 | 
							__SetPageLocked(new_page);
 | 
				
			||||||
		SetPageSwapBacked(new_page);
 | 
							SetPageSwapBacked(new_page);
 | 
				
			||||||
		err = __add_to_swap_cache(new_page, entry);
 | 
							err = __add_to_swap_cache(new_page, entry);
 | 
				
			||||||
		if (likely(!err)) {
 | 
							if (likely(!err)) {
 | 
				
			||||||
| 
						 | 
					@ -367,7 +367,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		radix_tree_preload_end();
 | 
							radix_tree_preload_end();
 | 
				
			||||||
		ClearPageSwapBacked(new_page);
 | 
							ClearPageSwapBacked(new_page);
 | 
				
			||||||
		__clear_page_locked(new_page);
 | 
							__ClearPageLocked(new_page);
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
 | 
							 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
 | 
				
			||||||
		 * clear SWAP_HAS_CACHE flag.
 | 
							 * clear SWAP_HAS_CACHE flag.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1184,7 +1184,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 | 
				
			||||||
		 * we obviously don't have to worry about waking up a process
 | 
							 * we obviously don't have to worry about waking up a process
 | 
				
			||||||
		 * waiting on the page lock, because there are no references.
 | 
							 * waiting on the page lock, because there are no references.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		__clear_page_locked(page);
 | 
							__ClearPageLocked(page);
 | 
				
			||||||
free_it:
 | 
					free_it:
 | 
				
			||||||
		nr_reclaimed++;
 | 
							nr_reclaimed++;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue