forked from mirrors/linux
		
	dm: split discards further if target sets max_discard_granularity
The block core (bio_split_discard) will already split discards based on the 'discard_granularity' and 'max_discard_sectors' queue_limits. But the DM thin target also needs to ensure that it doesn't receive a discard that spans a 'max_discard_sectors' boundary. Introduce a dm_target 'max_discard_granularity' flag that if set will cause DM core to split discard bios relative to 'max_discard_sectors'. This treats 'discard_granularity' as a "min_discard_granularity" and 'max_discard_sectors' as a "max_discard_granularity". Requested-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@kernel.org>
This commit is contained in:
		
							parent
							
								
									bb46c56165
								
							
						
					
					
						commit
						06961c487a
					
				
					 3 changed files with 27 additions and 8 deletions
				
			
		| 
						 | 
				
			
			@ -1162,7 +1162,8 @@ static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
 | 
			
		|||
	return ti->len - target_offset;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static sector_t max_io_len(struct dm_target *ti, sector_t sector)
 | 
			
		||||
static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
 | 
			
		||||
			     unsigned int max_granularity)
 | 
			
		||||
{
 | 
			
		||||
	sector_t target_offset = dm_target_offset(ti, sector);
 | 
			
		||||
	sector_t len = max_io_len_target_boundary(ti, target_offset);
 | 
			
		||||
| 
						 | 
				
			
			@ -1173,11 +1174,16 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector)
 | 
			
		|||
	 *   explains why stacked chunk_sectors based splitting via
 | 
			
		||||
	 *   bio_split_to_limits() isn't possible here.
 | 
			
		||||
	 */
 | 
			
		||||
	if (!ti->max_io_len)
 | 
			
		||||
	if (!max_granularity)
 | 
			
		||||
		return len;
 | 
			
		||||
	return min_t(sector_t, len,
 | 
			
		||||
		min(queue_max_sectors(ti->table->md->queue),
 | 
			
		||||
		    blk_chunk_sectors_left(target_offset, ti->max_io_len)));
 | 
			
		||||
		    blk_chunk_sectors_left(target_offset, max_granularity)));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
 | 
			
		||||
{
 | 
			
		||||
	return __max_io_len(ti, sector, ti->max_io_len);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
 | 
			
		||||
| 
						 | 
				
			
			@ -1565,12 +1571,13 @@ static void __send_empty_flush(struct clone_info *ci)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
 | 
			
		||||
					unsigned int num_bios)
 | 
			
		||||
					unsigned int num_bios,
 | 
			
		||||
					unsigned int max_granularity)
 | 
			
		||||
{
 | 
			
		||||
	unsigned int len, bios;
 | 
			
		||||
 | 
			
		||||
	len = min_t(sector_t, ci->sector_count,
 | 
			
		||||
		    max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
 | 
			
		||||
		    __max_io_len(ti, ci->sector, max_granularity));
 | 
			
		||||
 | 
			
		||||
	atomic_add(num_bios, &ci->io->io_count);
 | 
			
		||||
	bios = __send_duplicate_bios(ci, ti, num_bios, &len);
 | 
			
		||||
| 
						 | 
				
			
			@ -1606,10 +1613,16 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
 | 
			
		|||
					  struct dm_target *ti)
 | 
			
		||||
{
 | 
			
		||||
	unsigned int num_bios = 0;
 | 
			
		||||
	unsigned int max_granularity = 0;
 | 
			
		||||
 | 
			
		||||
	switch (bio_op(ci->bio)) {
 | 
			
		||||
	case REQ_OP_DISCARD:
 | 
			
		||||
		num_bios = ti->num_discard_bios;
 | 
			
		||||
		if (ti->max_discard_granularity) {
 | 
			
		||||
			struct queue_limits *limits =
 | 
			
		||||
				dm_get_queue_limits(ti->table->md);
 | 
			
		||||
			max_granularity = limits->max_discard_sectors;
 | 
			
		||||
		}
 | 
			
		||||
		break;
 | 
			
		||||
	case REQ_OP_SECURE_ERASE:
 | 
			
		||||
		num_bios = ti->num_secure_erase_bios;
 | 
			
		||||
| 
						 | 
				
			
			@ -1630,7 +1643,7 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
 | 
			
		|||
	if (unlikely(!num_bios))
 | 
			
		||||
		return BLK_STS_NOTSUPP;
 | 
			
		||||
 | 
			
		||||
	__send_changing_extent_only(ci, ti, num_bios);
 | 
			
		||||
	__send_changing_extent_only(ci, ti, num_bios, max_granularity);
 | 
			
		||||
	return BLK_STS_OK;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -358,6 +358,12 @@ struct dm_target {
 | 
			
		|||
	 */
 | 
			
		||||
	bool discards_supported:1;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Set if this target requires that discards be split on both
 | 
			
		||||
	 * 'discard_granularity' and 'max_discard_sectors' boundaries.
 | 
			
		||||
	 */
 | 
			
		||||
	bool max_discard_granularity:1;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Set if we need to limit the number of in-flight bios when swapping.
 | 
			
		||||
	 */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -286,9 +286,9 @@ enum {
 | 
			
		|||
#define DM_DEV_SET_GEOMETRY	_IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
 | 
			
		||||
 | 
			
		||||
#define DM_VERSION_MAJOR	4
 | 
			
		||||
#define DM_VERSION_MINOR	47
 | 
			
		||||
#define DM_VERSION_MINOR	48
 | 
			
		||||
#define DM_VERSION_PATCHLEVEL	0
 | 
			
		||||
#define DM_VERSION_EXTRA	"-ioctl (2022-07-28)"
 | 
			
		||||
#define DM_VERSION_EXTRA	"-ioctl (2023-03-01)"
 | 
			
		||||
 | 
			
		||||
/* Status bits */
 | 
			
		||||
#define DM_READONLY_FLAG	(1 << 0) /* In/Out */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue