mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	fs: Convert __set_page_dirty_buffers to block_dirty_folio
Convert all callers; mostly this is just changing the aops to point at it, but a few implementations need a little more work. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Tested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Acked-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs Tested-by: David Howells <dhowells@redhat.com> # afs
This commit is contained in:
		
							parent
							
								
									af7afdc7bb
								
							
						
					
					
						commit
						e621900ad2
					
				
					 32 changed files with 76 additions and 85 deletions
				
			
		| 
						 | 
				
			
			@ -429,7 +429,7 @@ static int blkdev_writepages(struct address_space *mapping,
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations def_blk_aops = {
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage	= blkdev_readpage,
 | 
			
		||||
	.readahead	= blkdev_readahead,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -73,7 +73,7 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static const struct address_space_operations adfs_aops = {
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage	= adfs_readpage,
 | 
			
		||||
	.writepage	= adfs_writepage,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -453,7 +453,7 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations affs_aops = {
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage = affs_readpage,
 | 
			
		||||
	.writepage = affs_writepage,
 | 
			
		||||
| 
						 | 
				
			
			@ -835,7 +835,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations affs_aops_ofs = {
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage = affs_readpage_ofs,
 | 
			
		||||
	//.writepage = affs_writepage_ofs,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -188,7 +188,7 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations bfs_aops = {
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage	= bfs_readpage,
 | 
			
		||||
	.writepage	= bfs_writepage,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										33
									
								
								fs/buffer.c
									
									
									
									
									
								
							
							
						
						
									
										33
									
								
								fs/buffer.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -613,17 +613,14 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
 | 
			
		|||
 * FIXME: may need to call ->reservepage here as well.  That's rather up to the
 | 
			
		||||
 * address_space though.
 | 
			
		||||
 */
 | 
			
		||||
int __set_page_dirty_buffers(struct page *page)
 | 
			
		||||
bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
 | 
			
		||||
{
 | 
			
		||||
	int newly_dirty;
 | 
			
		||||
	struct address_space *mapping = page_mapping(page);
 | 
			
		||||
 | 
			
		||||
	if (unlikely(!mapping))
 | 
			
		||||
		return !TestSetPageDirty(page);
 | 
			
		||||
	struct buffer_head *head;
 | 
			
		||||
	bool newly_dirty;
 | 
			
		||||
 | 
			
		||||
	spin_lock(&mapping->private_lock);
 | 
			
		||||
	if (page_has_buffers(page)) {
 | 
			
		||||
		struct buffer_head *head = page_buffers(page);
 | 
			
		||||
	head = folio_buffers(folio);
 | 
			
		||||
	if (head) {
 | 
			
		||||
		struct buffer_head *bh = head;
 | 
			
		||||
 | 
			
		||||
		do {
 | 
			
		||||
| 
						 | 
				
			
			@ -635,21 +632,21 @@ int __set_page_dirty_buffers(struct page *page)
 | 
			
		|||
	 * Lock out page's memcg migration to keep PageDirty
 | 
			
		||||
	 * synchronized with per-memcg dirty page counters.
 | 
			
		||||
	 */
 | 
			
		||||
	lock_page_memcg(page);
 | 
			
		||||
	newly_dirty = !TestSetPageDirty(page);
 | 
			
		||||
	folio_memcg_lock(folio);
 | 
			
		||||
	newly_dirty = !folio_test_set_dirty(folio);
 | 
			
		||||
	spin_unlock(&mapping->private_lock);
 | 
			
		||||
 | 
			
		||||
	if (newly_dirty)
 | 
			
		||||
		__set_page_dirty(page, mapping, 1);
 | 
			
		||||
		__folio_mark_dirty(folio, mapping, 1);
 | 
			
		||||
 | 
			
		||||
	unlock_page_memcg(page);
 | 
			
		||||
	folio_memcg_unlock(folio);
 | 
			
		||||
 | 
			
		||||
	if (newly_dirty)
 | 
			
		||||
		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 | 
			
		||||
 | 
			
		||||
	return newly_dirty;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(__set_page_dirty_buffers);
 | 
			
		||||
EXPORT_SYMBOL(block_dirty_folio);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Write out and wait upon a list of buffers.
 | 
			
		||||
| 
						 | 
				
			
			@ -1548,7 +1545,7 @@ EXPORT_SYMBOL(block_invalidate_folio);
 | 
			
		|||
 | 
			
		||||
/*
 | 
			
		||||
 * We attach and possibly dirty the buffers atomically wrt
 | 
			
		||||
 * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
 | 
			
		||||
 * block_dirty_folio() via private_lock.  try_to_free_buffers
 | 
			
		||||
 * is already excluded via the page lock.
 | 
			
		||||
 */
 | 
			
		||||
void create_empty_buffers(struct page *page,
 | 
			
		||||
| 
						 | 
				
			
			@ -1723,12 +1720,12 @@ int __block_write_full_page(struct inode *inode, struct page *page,
 | 
			
		|||
					(1 << BH_Dirty)|(1 << BH_Uptodate));
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
 | 
			
		||||
	 * Be very careful.  We have no exclusion from block_dirty_folio
 | 
			
		||||
	 * here, and the (potentially unmapped) buffers may become dirty at
 | 
			
		||||
	 * any time.  If a buffer becomes dirty here after we've inspected it
 | 
			
		||||
	 * then we just miss that fact, and the page stays dirty.
 | 
			
		||||
	 *
 | 
			
		||||
	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
 | 
			
		||||
	 * Buffers outside i_size may be dirtied by block_dirty_folio;
 | 
			
		||||
	 * handle that here by just cleaning them.
 | 
			
		||||
	 */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -3182,7 +3179,7 @@ EXPORT_SYMBOL(sync_dirty_buffer);
 | 
			
		|||
 *
 | 
			
		||||
 * The same applies to regular filesystem pages: if all the buffers are
 | 
			
		||||
 * clean then we set the page clean and proceed.  To do that, we require
 | 
			
		||||
 * total exclusion from __set_page_dirty_buffers().  That is obtained with
 | 
			
		||||
 * total exclusion from block_dirty_folio().  That is obtained with
 | 
			
		||||
 * private_lock.
 | 
			
		||||
 *
 | 
			
		||||
 * try_to_free_buffers() is non-blocking.
 | 
			
		||||
| 
						 | 
				
			
			@ -3249,7 +3246,7 @@ int try_to_free_buffers(struct page *page)
 | 
			
		|||
	 * the page also.
 | 
			
		||||
	 *
 | 
			
		||||
	 * private_lock must be held over this entire operation in order
 | 
			
		||||
	 * to synchronise against __set_page_dirty_buffers and prevent the
 | 
			
		||||
	 * to synchronise against block_dirty_folio and prevent the
 | 
			
		||||
	 * dirty bit from being lost.
 | 
			
		||||
	 */
 | 
			
		||||
	if (ret)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -545,7 +545,7 @@ const struct address_space_operations ecryptfs_aops = {
 | 
			
		|||
	 * feedback.
 | 
			
		||||
	 */
 | 
			
		||||
#ifdef CONFIG_BLOCK
 | 
			
		||||
	.set_page_dirty = __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
#endif
 | 
			
		||||
	.writepage = ecryptfs_writepage,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -490,7 +490,7 @@ int exfat_block_truncate_page(struct inode *inode, loff_t from)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static const struct address_space_operations exfat_aops = {
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage	= exfat_readpage,
 | 
			
		||||
	.readahead	= exfat_readahead,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -967,7 +967,7 @@ ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations ext2_aops = {
 | 
			
		||||
	.set_page_dirty		= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio		= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio	= block_invalidate_folio,
 | 
			
		||||
	.readpage		= ext2_readpage,
 | 
			
		||||
	.readahead		= ext2_readahead,
 | 
			
		||||
| 
						 | 
				
			
			@ -983,7 +983,7 @@ const struct address_space_operations ext2_aops = {
 | 
			
		|||
};
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations ext2_nobh_aops = {
 | 
			
		||||
	.set_page_dirty		= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio		= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio	= block_invalidate_folio,
 | 
			
		||||
	.readpage		= ext2_readpage,
 | 
			
		||||
	.readahead		= ext2_readahead,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3560,11 +3560,11 @@ static bool ext4_journalled_dirty_folio(struct address_space *mapping,
 | 
			
		|||
	return filemap_dirty_folio(mapping, folio);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int ext4_set_page_dirty(struct page *page)
 | 
			
		||||
static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio)
 | 
			
		||||
{
 | 
			
		||||
	WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page));
 | 
			
		||||
	WARN_ON_ONCE(!page_has_buffers(page));
 | 
			
		||||
	return __set_page_dirty_buffers(page);
 | 
			
		||||
	WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio));
 | 
			
		||||
	WARN_ON_ONCE(!folio_buffers(folio));
 | 
			
		||||
	return block_dirty_folio(mapping, folio);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
 | 
			
		||||
| 
						 | 
				
			
			@ -3581,7 +3581,7 @@ static const struct address_space_operations ext4_aops = {
 | 
			
		|||
	.writepages		= ext4_writepages,
 | 
			
		||||
	.write_begin		= ext4_write_begin,
 | 
			
		||||
	.write_end		= ext4_write_end,
 | 
			
		||||
	.set_page_dirty		= ext4_set_page_dirty,
 | 
			
		||||
	.dirty_folio		= ext4_dirty_folio,
 | 
			
		||||
	.bmap			= ext4_bmap,
 | 
			
		||||
	.invalidate_folio	= ext4_invalidate_folio,
 | 
			
		||||
	.releasepage		= ext4_releasepage,
 | 
			
		||||
| 
						 | 
				
			
			@ -3616,7 +3616,7 @@ static const struct address_space_operations ext4_da_aops = {
 | 
			
		|||
	.writepages		= ext4_writepages,
 | 
			
		||||
	.write_begin		= ext4_da_write_begin,
 | 
			
		||||
	.write_end		= ext4_da_write_end,
 | 
			
		||||
	.set_page_dirty		= ext4_set_page_dirty,
 | 
			
		||||
	.dirty_folio		= ext4_dirty_folio,
 | 
			
		||||
	.bmap			= ext4_bmap,
 | 
			
		||||
	.invalidate_folio	= ext4_invalidate_folio,
 | 
			
		||||
	.releasepage		= ext4_releasepage,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -342,7 +342,7 @@ int fat_block_truncate_page(struct inode *inode, loff_t from)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static const struct address_space_operations fat_aops = {
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage	= fat_readpage,
 | 
			
		||||
	.readahead	= fat_readahead,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -606,18 +606,12 @@ void adjust_fs_space(struct inode *inode)
 | 
			
		|||
	gfs2_trans_end(sdp);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * jdata_set_page_dirty - Page dirtying function
 | 
			
		||||
 * @page: The page to dirty
 | 
			
		||||
 *
 | 
			
		||||
 * Returns: 1 if it dirtyed the page, or 0 otherwise
 | 
			
		||||
 */
 | 
			
		||||
 
 | 
			
		||||
static int jdata_set_page_dirty(struct page *page)
 | 
			
		||||
static bool jdata_dirty_folio(struct address_space *mapping,
 | 
			
		||||
		struct folio *folio)
 | 
			
		||||
{
 | 
			
		||||
	if (current->journal_info)
 | 
			
		||||
		SetPageChecked(page);
 | 
			
		||||
	return __set_page_dirty_buffers(page);
 | 
			
		||||
		folio_set_checked(folio);
 | 
			
		||||
	return block_dirty_folio(mapping, folio);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			@ -795,7 +789,7 @@ static const struct address_space_operations gfs2_jdata_aops = {
 | 
			
		|||
	.writepages = gfs2_jdata_writepages,
 | 
			
		||||
	.readpage = gfs2_readpage,
 | 
			
		||||
	.readahead = gfs2_readahead,
 | 
			
		||||
	.set_page_dirty = jdata_set_page_dirty,
 | 
			
		||||
	.dirty_folio = jdata_dirty_folio,
 | 
			
		||||
	.bmap = gfs2_bmap,
 | 
			
		||||
	.invalidate_folio = gfs2_invalidate_folio,
 | 
			
		||||
	.releasepage = gfs2_releasepage,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -89,14 +89,14 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations gfs2_meta_aops = {
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.writepage = gfs2_aspace_writepage,
 | 
			
		||||
	.releasepage = gfs2_releasepage,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations gfs2_rgrp_aops = {
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.writepage = gfs2_aspace_writepage,
 | 
			
		||||
	.releasepage = gfs2_releasepage,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -159,7 +159,7 @@ static int hfs_writepages(struct address_space *mapping,
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations hfs_btree_aops = {
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage	= hfs_readpage,
 | 
			
		||||
	.writepage	= hfs_writepage,
 | 
			
		||||
| 
						 | 
				
			
			@ -170,7 +170,7 @@ const struct address_space_operations hfs_btree_aops = {
 | 
			
		|||
};
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations hfs_aops = {
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage	= hfs_readpage,
 | 
			
		||||
	.writepage	= hfs_writepage,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -156,7 +156,7 @@ static int hfsplus_writepages(struct address_space *mapping,
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations hfsplus_btree_aops = {
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage	= hfsplus_readpage,
 | 
			
		||||
	.writepage	= hfsplus_writepage,
 | 
			
		||||
| 
						 | 
				
			
			@ -167,7 +167,7 @@ const struct address_space_operations hfsplus_btree_aops = {
 | 
			
		|||
};
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations hfsplus_aops = {
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage	= hfsplus_readpage,
 | 
			
		||||
	.writepage	= hfsplus_writepage,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -245,7 +245,7 @@ static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations hpfs_aops = {
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage = hpfs_readpage,
 | 
			
		||||
	.writepage = hpfs_writepage,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -357,7 +357,7 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations jfs_aops = {
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage	= jfs_readpage,
 | 
			
		||||
	.readahead	= jfs_readahead,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -442,7 +442,7 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static const struct address_space_operations minix_aops = {
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage = minix_readpage,
 | 
			
		||||
	.writepage = minix_writepage,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -504,7 +504,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
 | 
			
		|||
			if (!buffer_mapped(bh)) {
 | 
			
		||||
				/*
 | 
			
		||||
				 * unmapped dirty buffers are created by
 | 
			
		||||
				 * __set_page_dirty_buffers -> mmapped data
 | 
			
		||||
				 * block_dirty_folio -> mmapped data
 | 
			
		||||
				 */
 | 
			
		||||
				if (buffer_dirty(bh))
 | 
			
		||||
					goto confused;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -434,7 +434,7 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
 | 
			
		|||
 | 
			
		||||
 | 
			
		||||
static const struct address_space_operations def_mdt_aops = {
 | 
			
		||||
	.set_page_dirty		= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio		= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio	= block_invalidate_folio,
 | 
			
		||||
	.writepage		= nilfs_mdt_write_page,
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -593,12 +593,12 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
 | 
			
		|||
	iblock = initialized_size >> blocksize_bits;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
 | 
			
		||||
	 * Be very careful.  We have no exclusion from block_dirty_folio
 | 
			
		||||
	 * here, and the (potentially unmapped) buffers may become dirty at
 | 
			
		||||
	 * any time.  If a buffer becomes dirty here after we've inspected it
 | 
			
		||||
	 * then we just miss that fact, and the page stays dirty.
 | 
			
		||||
	 *
 | 
			
		||||
	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
 | 
			
		||||
	 * Buffers outside i_size may be dirtied by block_dirty_folio;
 | 
			
		||||
	 * handle that here by just cleaning them.
 | 
			
		||||
	 */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -653,7 +653,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
 | 
			
		|||
				// Update initialized size in the attribute and
 | 
			
		||||
				// in the inode.
 | 
			
		||||
				// Again, for each page do:
 | 
			
		||||
				//	__set_page_dirty_buffers();
 | 
			
		||||
				//	block_dirty_folio();
 | 
			
		||||
				// put_page()
 | 
			
		||||
				// We don't need to wait on the writes.
 | 
			
		||||
				// Update iblock.
 | 
			
		||||
| 
						 | 
				
			
			@ -1654,7 +1654,7 @@ const struct address_space_operations ntfs_normal_aops = {
 | 
			
		|||
	.readpage	= ntfs_readpage,
 | 
			
		||||
#ifdef NTFS_RW
 | 
			
		||||
	.writepage	= ntfs_writepage,
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
#endif /* NTFS_RW */
 | 
			
		||||
	.bmap		= ntfs_bmap,
 | 
			
		||||
	.migratepage	= buffer_migrate_page,
 | 
			
		||||
| 
						 | 
				
			
			@ -1669,7 +1669,7 @@ const struct address_space_operations ntfs_compressed_aops = {
 | 
			
		|||
	.readpage	= ntfs_readpage,
 | 
			
		||||
#ifdef NTFS_RW
 | 
			
		||||
	.writepage	= ntfs_writepage,
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
#endif /* NTFS_RW */
 | 
			
		||||
	.migratepage	= buffer_migrate_page,
 | 
			
		||||
	.is_partially_uptodate = block_is_partially_uptodate,
 | 
			
		||||
| 
						 | 
				
			
			@ -1746,7 +1746,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
 | 
			
		|||
		set_buffer_dirty(bh);
 | 
			
		||||
	} while ((bh = bh->b_this_page) != head);
 | 
			
		||||
	spin_unlock(&mapping->private_lock);
 | 
			
		||||
	__set_page_dirty_nobuffers(page);
 | 
			
		||||
	block_dirty_folio(mapping, page_folio(page));
 | 
			
		||||
	if (unlikely(buffers_to_free)) {
 | 
			
		||||
		do {
 | 
			
		||||
			bh = buffers_to_free->b_this_page;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1950,7 +1950,7 @@ const struct address_space_operations ntfs_aops = {
 | 
			
		|||
	.write_end	= ntfs_write_end,
 | 
			
		||||
	.direct_IO	= ntfs_direct_IO,
 | 
			
		||||
	.bmap		= ntfs_bmap,
 | 
			
		||||
	.set_page_dirty = __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations ntfs_aops_cmpr = {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2453,7 +2453,7 @@ static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations ocfs2_aops = {
 | 
			
		||||
	.set_page_dirty		= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio		= block_dirty_folio,
 | 
			
		||||
	.readpage		= ocfs2_readpage,
 | 
			
		||||
	.readahead		= ocfs2_readahead,
 | 
			
		||||
	.writepage		= ocfs2_writepage,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -372,7 +372,7 @@ const struct inode_operations omfs_file_inops = {
 | 
			
		|||
};
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations omfs_aops = {
 | 
			
		||||
	.set_page_dirty = __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio = block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage = omfs_readpage,
 | 
			
		||||
	.readahead = omfs_readahead,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3201,14 +3201,14 @@ static void reiserfs_invalidate_folio(struct folio *folio, size_t offset,
 | 
			
		|||
	return;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int reiserfs_set_page_dirty(struct page *page)
 | 
			
		||||
static bool reiserfs_dirty_folio(struct address_space *mapping,
 | 
			
		||||
		struct folio *folio)
 | 
			
		||||
{
 | 
			
		||||
	struct inode *inode = page->mapping->host;
 | 
			
		||||
	if (reiserfs_file_data_log(inode)) {
 | 
			
		||||
		SetPageChecked(page);
 | 
			
		||||
		return __set_page_dirty_nobuffers(page);
 | 
			
		||||
	if (reiserfs_file_data_log(mapping->host)) {
 | 
			
		||||
		folio_set_checked(folio);
 | 
			
		||||
		return filemap_dirty_folio(mapping, folio);
 | 
			
		||||
	}
 | 
			
		||||
	return __set_page_dirty_buffers(page);
 | 
			
		||||
	return block_dirty_folio(mapping, folio);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -3435,5 +3435,5 @@ const struct address_space_operations reiserfs_address_space_operations = {
 | 
			
		|||
	.write_end = reiserfs_write_end,
 | 
			
		||||
	.bmap = reiserfs_aop_bmap,
 | 
			
		||||
	.direct_IO = reiserfs_direct_IO,
 | 
			
		||||
	.set_page_dirty = reiserfs_set_page_dirty,
 | 
			
		||||
	.dirty_folio = reiserfs_dirty_folio,
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -495,7 +495,7 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations sysv_aops = {
 | 
			
		||||
	.set_page_dirty = __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio = block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage = sysv_readpage,
 | 
			
		||||
	.writepage = sysv_writepage,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -125,7 +125,7 @@ static int udf_adinicb_write_end(struct file *file, struct address_space *mappin
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations udf_adinicb_aops = {
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage	= udf_adinicb_readpage,
 | 
			
		||||
	.writepage	= udf_adinicb_writepage,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -235,7 +235,7 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations udf_aops = {
 | 
			
		||||
	.set_page_dirty	= __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio	= block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage	= udf_readpage,
 | 
			
		||||
	.readahead	= udf_readahead,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -526,7 +526,7 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
const struct address_space_operations ufs_aops = {
 | 
			
		||||
	.set_page_dirty = __set_page_dirty_buffers,
 | 
			
		||||
	.dirty_folio = block_dirty_folio,
 | 
			
		||||
	.invalidate_folio = block_invalidate_folio,
 | 
			
		||||
	.readpage = ufs_readpage,
 | 
			
		||||
	.writepage = ufs_writepage,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -397,7 +397,7 @@ __bread(struct block_device *bdev, sector_t block, unsigned size)
 | 
			
		|||
	return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
extern int __set_page_dirty_buffers(struct page *page);
 | 
			
		||||
bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
 | 
			
		||||
 | 
			
		||||
#else /* CONFIG_BLOCK */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -72,7 +72,7 @@
 | 
			
		|||
 * Lock ordering:
 | 
			
		||||
 *
 | 
			
		||||
 *  ->i_mmap_rwsem		(truncate_pagecache)
 | 
			
		||||
 *    ->private_lock		(__free_pte->__set_page_dirty_buffers)
 | 
			
		||||
 *    ->private_lock		(__free_pte->block_dirty_folio)
 | 
			
		||||
 *      ->swap_lock		(exclusive_swap_page, others)
 | 
			
		||||
 *        ->i_pages lock
 | 
			
		||||
 *
 | 
			
		||||
| 
						 | 
				
			
			@ -115,7 +115,7 @@
 | 
			
		|||
 *    ->memcg->move_lock	(page_remove_rmap->lock_page_memcg)
 | 
			
		||||
 *    bdi.wb->list_lock		(zap_pte_range->set_page_dirty)
 | 
			
		||||
 *    ->inode->i_lock		(zap_pte_range->set_page_dirty)
 | 
			
		||||
 *    ->private_lock		(zap_pte_range->__set_page_dirty_buffers)
 | 
			
		||||
 *    ->private_lock		(zap_pte_range->block_dirty_folio)
 | 
			
		||||
 *
 | 
			
		||||
 * ->i_mmap_rwsem
 | 
			
		||||
 *   ->tasklist_lock            (memory_failure, collect_procs_ao)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2530,7 +2530,7 @@ void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
 | 
			
		|||
 * This is also sometimes used by filesystems which use buffer_heads when
 | 
			
		||||
 * a single buffer is being dirtied: we want to set the folio dirty in
 | 
			
		||||
 * that case, but not all the buffers.  This is a "bottom-up" dirtying,
 | 
			
		||||
 * whereas __set_page_dirty_buffers() is a "top-down" dirtying.
 | 
			
		||||
 * whereas block_dirty_folio() is a "top-down" dirtying.
 | 
			
		||||
 *
 | 
			
		||||
 * The caller must ensure this doesn't race with truncation.  Most will
 | 
			
		||||
 * simply hold the folio lock, but e.g. zap_pte_range() calls with the
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -31,8 +31,8 @@
 | 
			
		|||
 *               mm->page_table_lock or pte_lock
 | 
			
		||||
 *                 swap_lock (in swap_duplicate, swap_info_get)
 | 
			
		||||
 *                   mmlist_lock (in mmput, drain_mmlist and others)
 | 
			
		||||
 *                   mapping->private_lock (in __set_page_dirty_buffers)
 | 
			
		||||
 *                     lock_page_memcg move_lock (in __set_page_dirty_buffers)
 | 
			
		||||
 *                   mapping->private_lock (in block_dirty_folio)
 | 
			
		||||
 *                     folio_lock_memcg move_lock (in block_dirty_folio)
 | 
			
		||||
 *                       i_pages lock (widely used)
 | 
			
		||||
 *                         lruvec->lru_lock (in folio_lruvec_lock_irq)
 | 
			
		||||
 *                   inode->i_lock (in set_page_dirty's __mark_inode_dirty)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue