forked from mirrors/linux
		
	dax: use sb_issue_zerout instead of calling dax_clear_sectors
dax_clear_sectors() cannot handle poisoned blocks. These must be zeroed using the BIO interface instead. Convert ext2 and XFS to use only sb_issue_zerout(). Reviewed-by: Jeff Moyer <jmoyer@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com> [vishal: Also remove the dax_clear_sectors function entirely] Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
This commit is contained in:
		
							parent
							
								
									0a70bd4305
								
							
						
					
					
						commit
						3dc2916107
					
				
					 4 changed files with 8 additions and 48 deletions
				
			
		
							
								
								
									
										32
									
								
								fs/dax.c
									
									
									
									
									
								
							
							
						
						
									
										32
									
								
								fs/dax.c
									
									
									
									
									
								
							| 
						 | 
					@ -87,38 +87,6 @@ struct page *read_dax_sector(struct block_device *bdev, sector_t n)
 | 
				
			||||||
	return page;
 | 
						return page;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * dax_clear_sectors() is called from within transaction context from XFS,
 | 
					 | 
				
			||||||
 * and hence this means the stack from this point must follow GFP_NOFS
 | 
					 | 
				
			||||||
 * semantics for all operations.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct blk_dax_ctl dax = {
 | 
					 | 
				
			||||||
		.sector = _sector,
 | 
					 | 
				
			||||||
		.size = _size,
 | 
					 | 
				
			||||||
	};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	might_sleep();
 | 
					 | 
				
			||||||
	do {
 | 
					 | 
				
			||||||
		long count, sz;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		count = dax_map_atomic(bdev, &dax);
 | 
					 | 
				
			||||||
		if (count < 0)
 | 
					 | 
				
			||||||
			return count;
 | 
					 | 
				
			||||||
		sz = min_t(long, count, SZ_128K);
 | 
					 | 
				
			||||||
		clear_pmem(dax.addr, sz);
 | 
					 | 
				
			||||||
		dax.size -= sz;
 | 
					 | 
				
			||||||
		dax.sector += sz / 512;
 | 
					 | 
				
			||||||
		dax_unmap_atomic(bdev, &dax);
 | 
					 | 
				
			||||||
		cond_resched();
 | 
					 | 
				
			||||||
	} while (dax.size);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	wmb_pmem();
 | 
					 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
EXPORT_SYMBOL_GPL(dax_clear_sectors);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static bool buffer_written(struct buffer_head *bh)
 | 
					static bool buffer_written(struct buffer_head *bh)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return buffer_mapped(bh) && !buffer_unwritten(bh);
 | 
						return buffer_mapped(bh) && !buffer_unwritten(bh);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -26,6 +26,7 @@
 | 
				
			||||||
#include <linux/highuid.h>
 | 
					#include <linux/highuid.h>
 | 
				
			||||||
#include <linux/pagemap.h>
 | 
					#include <linux/pagemap.h>
 | 
				
			||||||
#include <linux/dax.h>
 | 
					#include <linux/dax.h>
 | 
				
			||||||
 | 
					#include <linux/blkdev.h>
 | 
				
			||||||
#include <linux/quotaops.h>
 | 
					#include <linux/quotaops.h>
 | 
				
			||||||
#include <linux/writeback.h>
 | 
					#include <linux/writeback.h>
 | 
				
			||||||
#include <linux/buffer_head.h>
 | 
					#include <linux/buffer_head.h>
 | 
				
			||||||
| 
						 | 
					@ -737,10 +738,9 @@ static int ext2_get_blocks(struct inode *inode,
 | 
				
			||||||
		 * so that it's not found by another thread before it's
 | 
							 * so that it's not found by another thread before it's
 | 
				
			||||||
		 * initialised
 | 
							 * initialised
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		err = dax_clear_sectors(inode->i_sb->s_bdev,
 | 
							err = sb_issue_zeroout(inode->i_sb,
 | 
				
			||||||
				le32_to_cpu(chain[depth-1].key) <<
 | 
									le32_to_cpu(chain[depth-1].key), count,
 | 
				
			||||||
				(inode->i_blkbits - 9),
 | 
									GFP_NOFS);
 | 
				
			||||||
				count << inode->i_blkbits);
 | 
					 | 
				
			||||||
		if (err) {
 | 
							if (err) {
 | 
				
			||||||
			mutex_unlock(&ei->truncate_mutex);
 | 
								mutex_unlock(&ei->truncate_mutex);
 | 
				
			||||||
			goto cleanup;
 | 
								goto cleanup;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -72,18 +72,11 @@ xfs_zero_extent(
 | 
				
			||||||
	struct xfs_mount *mp = ip->i_mount;
 | 
						struct xfs_mount *mp = ip->i_mount;
 | 
				
			||||||
	xfs_daddr_t	sector = xfs_fsb_to_db(ip, start_fsb);
 | 
						xfs_daddr_t	sector = xfs_fsb_to_db(ip, start_fsb);
 | 
				
			||||||
	sector_t	block = XFS_BB_TO_FSBT(mp, sector);
 | 
						sector_t	block = XFS_BB_TO_FSBT(mp, sector);
 | 
				
			||||||
	ssize_t		size = XFS_FSB_TO_B(mp, count_fsb);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (IS_DAX(VFS_I(ip)))
 | 
					 | 
				
			||||||
		return dax_clear_sectors(xfs_find_bdev_for_inode(VFS_I(ip)),
 | 
					 | 
				
			||||||
				sector, size);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * let the block layer decide on the fastest method of
 | 
					 | 
				
			||||||
	 * implementing the zeroing.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	return sb_issue_zeroout(mp->m_super, block, count_fsb, GFP_NOFS);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
 | 
				
			||||||
 | 
							block << (mp->m_super->s_blocksize_bits - 9),
 | 
				
			||||||
 | 
							count_fsb << (mp->m_super->s_blocksize_bits - 9),
 | 
				
			||||||
 | 
							GFP_NOFS, true);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -7,7 +7,6 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
 | 
					ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
 | 
				
			||||||
		  get_block_t, dio_iodone_t, int flags);
 | 
							  get_block_t, dio_iodone_t, int flags);
 | 
				
			||||||
int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size);
 | 
					 | 
				
			||||||
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
 | 
					int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
 | 
				
			||||||
int dax_truncate_page(struct inode *, loff_t from, get_block_t);
 | 
					int dax_truncate_page(struct inode *, loff_t from, get_block_t);
 | 
				
			||||||
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
 | 
					int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue