forked from mirrors/linux
		
	- Constify a few variables in DM core and DM integrity - Add bufio optimization and checksum failure accounting to DM integrity - Fix DM integrity to avoid checking integrity of failed reads - Fix DM integrity to use init_completion - A couple DM log-writes target fixes - Simplify DAX flushing by eliminating the unnecessary flush abstraction that was stood up for DM's use. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJZuo8UAAoJEMUj8QotnQNa5BEIANO4mHh1nrzEbH72a4RCLgxV H1Pk1zZx/W1bhOOmcRRhxCSM85dPgsCegc5EmpwLZEMavQrP9UZblHcYOUsyIx7W S/lWa+soOq/5N2OveROc4WdoWVs50UFmc1+BcClc4YrEe+15XC3R0VMkjX2b/hUL o2eYhPjpMlgaorMtRRU6MAooo2fBRQ9m05aPeVgd35fxibrE7PZm+EYW09wa0STi 9ufuDXJf8+TtFP/38BD41LbUEskuHUZTSDeAJ+3DBaTtfEZcZYxsst4P9JangsHx jqqqI9aYzFD2a27fl9WLhCvm40YFiKp5nwzED0RZjzWxVa/jTShX7a49BdzTTfw= =rkSB -----END PGP SIGNATURE----- Merge tag 'for-4.14/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper updates from Mike Snitzer: - Some request-based DM core and DM multipath fixes and cleanups - Constify a few variables in DM core and DM integrity - Add bufio optimization and checksum failure accounting to DM integrity - Fix DM integrity to avoid checking integrity of failed reads - Fix DM integrity to use init_completion - A couple DM log-writes target fixes - Simplify DAX flushing by eliminating the unnecessary flush abstraction that was stood up for DM's use. * tag 'for-4.14/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dax: remove the pmem_dax_ops->flush abstraction dm integrity: use init_completion instead of COMPLETION_INITIALIZER_ONSTACK dm integrity: make blk_integrity_profile structure const dm integrity: do not check integrity for failed read operations dm log writes: fix >512b sectorsize support dm log writes: don't use all the cpu while waiting to log blocks dm ioctl: constify ioctl lookup table dm: constify argument arrays dm integrity: count and display checksum failures dm integrity: optimize writing dm-bufio buffers that are partially changed dm rq: do not update rq partially in each ending bio dm rq: make dm-sq requeuing behavior consistent with dm-mq behavior dm mpath: complain about unsupported __multipath_map_bio() return values dm mpath: avoid that building with W=1 causes gcc 7 to complain about fall-through
		
			
				
	
	
		
			124 lines
		
	
	
	
		
			3.5 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			124 lines
		
	
	
	
		
			3.5 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
#ifndef _LINUX_DAX_H
 | 
						|
#define _LINUX_DAX_H
 | 
						|
 | 
						|
#include <linux/fs.h>
 | 
						|
#include <linux/mm.h>
 | 
						|
#include <linux/radix-tree.h>
 | 
						|
#include <asm/pgtable.h>
 | 
						|
 | 
						|
struct iomap_ops;
 | 
						|
struct dax_device;
 | 
						|
struct dax_operations {
 | 
						|
	/*
 | 
						|
	 * direct_access: translate a device-relative
 | 
						|
	 * logical-page-offset into an absolute physical pfn. Return the
 | 
						|
	 * number of pages available for DAX at that pfn.
 | 
						|
	 */
 | 
						|
	long (*direct_access)(struct dax_device *, pgoff_t, long,
 | 
						|
			void **, pfn_t *);
 | 
						|
	/* copy_from_iter: required operation for fs-dax direct-i/o */
 | 
						|
	size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
 | 
						|
			struct iov_iter *);
 | 
						|
};
 | 
						|
 | 
						|
extern struct attribute_group dax_attribute_group;
 | 
						|
 | 
						|
#if IS_ENABLED(CONFIG_DAX)
 | 
						|
struct dax_device *dax_get_by_host(const char *host);
 | 
						|
void put_dax(struct dax_device *dax_dev);
 | 
						|
#else
 | 
						|
static inline struct dax_device *dax_get_by_host(const char *host)
 | 
						|
{
 | 
						|
	return NULL;
 | 
						|
}
 | 
						|
 | 
						|
static inline void put_dax(struct dax_device *dax_dev)
 | 
						|
{
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
 | 
						|
#if IS_ENABLED(CONFIG_FS_DAX)
 | 
						|
int __bdev_dax_supported(struct super_block *sb, int blocksize);
 | 
						|
static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
 | 
						|
{
 | 
						|
	return __bdev_dax_supported(sb, blocksize);
 | 
						|
}
 | 
						|
 | 
						|
static inline struct dax_device *fs_dax_get_by_host(const char *host)
 | 
						|
{
 | 
						|
	return dax_get_by_host(host);
 | 
						|
}
 | 
						|
 | 
						|
static inline void fs_put_dax(struct dax_device *dax_dev)
 | 
						|
{
 | 
						|
	put_dax(dax_dev);
 | 
						|
}
 | 
						|
 | 
						|
struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
 | 
						|
#else
 | 
						|
static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
 | 
						|
{
 | 
						|
	return -EOPNOTSUPP;
 | 
						|
}
 | 
						|
 | 
						|
static inline struct dax_device *fs_dax_get_by_host(const char *host)
 | 
						|
{
 | 
						|
	return NULL;
 | 
						|
}
 | 
						|
 | 
						|
static inline void fs_put_dax(struct dax_device *dax_dev)
 | 
						|
{
 | 
						|
}
 | 
						|
 | 
						|
static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
 | 
						|
{
 | 
						|
	return NULL;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
int dax_read_lock(void);
 | 
						|
void dax_read_unlock(int id);
 | 
						|
struct dax_device *alloc_dax(void *private, const char *host,
 | 
						|
		const struct dax_operations *ops);
 | 
						|
bool dax_alive(struct dax_device *dax_dev);
 | 
						|
void kill_dax(struct dax_device *dax_dev);
 | 
						|
void *dax_get_private(struct dax_device *dax_dev);
 | 
						|
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
 | 
						|
		void **kaddr, pfn_t *pfn);
 | 
						|
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
 | 
						|
		size_t bytes, struct iov_iter *i);
 | 
						|
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
 | 
						|
void dax_write_cache(struct dax_device *dax_dev, bool wc);
 | 
						|
bool dax_write_cache_enabled(struct dax_device *dax_dev);
 | 
						|
 | 
						|
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
 | 
						|
		const struct iomap_ops *ops);
 | 
						|
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
 | 
						|
		    const struct iomap_ops *ops);
 | 
						|
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
 | 
						|
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
 | 
						|
				      pgoff_t index);
 | 
						|
 | 
						|
#ifdef CONFIG_FS_DAX
 | 
						|
int __dax_zero_page_range(struct block_device *bdev,
 | 
						|
		struct dax_device *dax_dev, sector_t sector,
 | 
						|
		unsigned int offset, unsigned int length);
 | 
						|
#else
 | 
						|
static inline int __dax_zero_page_range(struct block_device *bdev,
 | 
						|
		struct dax_device *dax_dev, sector_t sector,
 | 
						|
		unsigned int offset, unsigned int length)
 | 
						|
{
 | 
						|
	return -ENXIO;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
static inline bool dax_mapping(struct address_space *mapping)
 | 
						|
{
 | 
						|
	return mapping->host && IS_DAX(mapping->host);
 | 
						|
}
 | 
						|
 | 
						|
struct writeback_control;
 | 
						|
int dax_writeback_mapping_range(struct address_space *mapping,
 | 
						|
		struct block_device *bdev, struct writeback_control *wbc);
 | 
						|
#endif
 |