forked from mirrors/linux
		
	dm: stop using blk_limits_io_{min,opt}
Remove use of the blk_limits_io_{min,opt} and assign the values directly
to the queue_limits structure.  For the io_opt this is a completely
mechanical change, for io_min it removes flooring the limit to the
physical and logical block size in the particular caller.  But as
blk_validate_limits will do the same later when actually applying the
limits, there still is no change in overall behavior.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
			
			
This commit is contained in:
		
							parent
							
								
									0d815e3400
								
							
						
					
					
						commit
						0a94a469a4
					
				
					 11 changed files with 20 additions and 20 deletions
				
			
		|  | @ -3416,8 +3416,8 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) | ||||||
| 	 */ | 	 */ | ||||||
| 	if (io_opt_sectors < cache->sectors_per_block || | 	if (io_opt_sectors < cache->sectors_per_block || | ||||||
| 	    do_div(io_opt_sectors, cache->sectors_per_block)) { | 	    do_div(io_opt_sectors, cache->sectors_per_block)) { | ||||||
| 		blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT); | 		limits->io_min = cache->sectors_per_block << SECTOR_SHIFT; | ||||||
| 		blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); | 		limits->io_opt = cache->sectors_per_block << SECTOR_SHIFT; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	disable_passdown_if_not_supported(cache); | 	disable_passdown_if_not_supported(cache); | ||||||
|  |  | ||||||
|  | @ -2073,8 +2073,8 @@ static void clone_io_hints(struct dm_target *ti, struct queue_limits *limits) | ||||||
| 	 */ | 	 */ | ||||||
| 	if (io_opt_sectors < clone->region_size || | 	if (io_opt_sectors < clone->region_size || | ||||||
| 	    do_div(io_opt_sectors, clone->region_size)) { | 	    do_div(io_opt_sectors, clone->region_size)) { | ||||||
| 		blk_limits_io_min(limits, clone->region_size << SECTOR_SHIFT); | 		limits->io_min = clone->region_size << SECTOR_SHIFT; | ||||||
| 		blk_limits_io_opt(limits, clone->region_size << SECTOR_SHIFT); | 		limits->io_opt = clone->region_size << SECTOR_SHIFT; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	disable_passdown_if_not_supported(clone); | 	disable_passdown_if_not_supported(clone); | ||||||
|  |  | ||||||
|  | @ -428,7 +428,7 @@ static void ebs_io_hints(struct dm_target *ti, struct queue_limits *limits) | ||||||
| 	limits->logical_block_size = to_bytes(ec->e_bs); | 	limits->logical_block_size = to_bytes(ec->e_bs); | ||||||
| 	limits->physical_block_size = to_bytes(ec->u_bs); | 	limits->physical_block_size = to_bytes(ec->u_bs); | ||||||
| 	limits->alignment_offset = limits->physical_block_size; | 	limits->alignment_offset = limits->physical_block_size; | ||||||
| 	blk_limits_io_min(limits, limits->logical_block_size); | 	limits->io_min = limits->logical_block_size; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int ebs_iterate_devices(struct dm_target *ti, | static int ebs_iterate_devices(struct dm_target *ti, | ||||||
|  |  | ||||||
|  | @ -1733,8 +1733,8 @@ static void era_io_hints(struct dm_target *ti, struct queue_limits *limits) | ||||||
| 	 */ | 	 */ | ||||||
| 	if (io_opt_sectors < era->sectors_per_block || | 	if (io_opt_sectors < era->sectors_per_block || | ||||||
| 	    do_div(io_opt_sectors, era->sectors_per_block)) { | 	    do_div(io_opt_sectors, era->sectors_per_block)) { | ||||||
| 		blk_limits_io_min(limits, 0); | 		limits->io_min = 0; | ||||||
| 		blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT); | 		limits->io_opt = era->sectors_per_block << SECTOR_SHIFT; | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -3471,7 +3471,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim | ||||||
| 	if (ic->sectors_per_block > 1) { | 	if (ic->sectors_per_block > 1) { | ||||||
| 		limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT; | 		limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT; | ||||||
| 		limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT; | 		limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT; | ||||||
| 		blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT); | 		limits->io_min = ic->sectors_per_block << SECTOR_SHIFT; | ||||||
| 		limits->dma_alignment = limits->logical_block_size - 1; | 		limits->dma_alignment = limits->logical_block_size - 1; | ||||||
| 		limits->discard_granularity = ic->sectors_per_block << SECTOR_SHIFT; | 		limits->discard_granularity = ic->sectors_per_block << SECTOR_SHIFT; | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -3802,8 +3802,8 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) | ||||||
| 	struct raid_set *rs = ti->private; | 	struct raid_set *rs = ti->private; | ||||||
| 	unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors); | 	unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors); | ||||||
| 
 | 
 | ||||||
| 	blk_limits_io_min(limits, chunk_size_bytes); | 	limits->io_min = chunk_size_bytes; | ||||||
| 	blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs)); | 	limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void raid_presuspend(struct dm_target *ti) | static void raid_presuspend(struct dm_target *ti) | ||||||
|  |  | ||||||
|  | @ -459,8 +459,8 @@ static void stripe_io_hints(struct dm_target *ti, | ||||||
| 	struct stripe_c *sc = ti->private; | 	struct stripe_c *sc = ti->private; | ||||||
| 	unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT; | 	unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT; | ||||||
| 
 | 
 | ||||||
| 	blk_limits_io_min(limits, chunk_size); | 	limits->io_min = chunk_size; | ||||||
| 	blk_limits_io_opt(limits, chunk_size * sc->stripes); | 	limits->io_opt = chunk_size * sc->stripes; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static struct target_type stripe_target = { | static struct target_type stripe_target = { | ||||||
|  |  | ||||||
|  | @ -4079,10 +4079,10 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) | ||||||
| 	if (io_opt_sectors < pool->sectors_per_block || | 	if (io_opt_sectors < pool->sectors_per_block || | ||||||
| 	    !is_factor(io_opt_sectors, pool->sectors_per_block)) { | 	    !is_factor(io_opt_sectors, pool->sectors_per_block)) { | ||||||
| 		if (is_factor(pool->sectors_per_block, limits->max_sectors)) | 		if (is_factor(pool->sectors_per_block, limits->max_sectors)) | ||||||
| 			blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT); | 			limits->io_min = limits->max_sectors << SECTOR_SHIFT; | ||||||
| 		else | 		else | ||||||
| 			blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT); | 			limits->io_min = pool->sectors_per_block << SECTOR_SHIFT; | ||||||
| 		blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); | 		limits->io_opt = pool->sectors_per_block << SECTOR_SHIFT; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
|  |  | ||||||
|  | @ -928,9 +928,9 @@ static void vdo_io_hints(struct dm_target *ti, struct queue_limits *limits) | ||||||
| 	limits->physical_block_size = VDO_BLOCK_SIZE; | 	limits->physical_block_size = VDO_BLOCK_SIZE; | ||||||
| 
 | 
 | ||||||
| 	/* The minimum io size for random io */ | 	/* The minimum io size for random io */ | ||||||
| 	blk_limits_io_min(limits, VDO_BLOCK_SIZE); | 	limits->io_min = VDO_BLOCK_SIZE; | ||||||
| 	/* The optimal io size for streamed/sequential io */ | 	/* The optimal io size for streamed/sequential io */ | ||||||
| 	blk_limits_io_opt(limits, VDO_BLOCK_SIZE); | 	limits->io_opt = VDO_BLOCK_SIZE; | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Sets the maximum discard size that will be passed into VDO. This value comes from a | 	 * Sets the maximum discard size that will be passed into VDO. This value comes from a | ||||||
|  |  | ||||||
|  | @ -919,7 +919,7 @@ static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits) | ||||||
| 	if (limits->physical_block_size < 1 << v->data_dev_block_bits) | 	if (limits->physical_block_size < 1 << v->data_dev_block_bits) | ||||||
| 		limits->physical_block_size = 1 << v->data_dev_block_bits; | 		limits->physical_block_size = 1 << v->data_dev_block_bits; | ||||||
| 
 | 
 | ||||||
| 	blk_limits_io_min(limits, limits->logical_block_size); | 	limits->io_min = limits->logical_block_size; | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Similar to what dm-crypt does, opt dm-verity out of support for | 	 * Similar to what dm-crypt does, opt dm-verity out of support for | ||||||
|  |  | ||||||
|  | @ -996,8 +996,8 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits) | ||||||
| 	limits->logical_block_size = DMZ_BLOCK_SIZE; | 	limits->logical_block_size = DMZ_BLOCK_SIZE; | ||||||
| 	limits->physical_block_size = DMZ_BLOCK_SIZE; | 	limits->physical_block_size = DMZ_BLOCK_SIZE; | ||||||
| 
 | 
 | ||||||
| 	blk_limits_io_min(limits, DMZ_BLOCK_SIZE); | 	limits->io_min = DMZ_BLOCK_SIZE; | ||||||
| 	blk_limits_io_opt(limits, DMZ_BLOCK_SIZE); | 	limits->io_opt = DMZ_BLOCK_SIZE; | ||||||
| 
 | 
 | ||||||
| 	limits->discard_alignment = 0; | 	limits->discard_alignment = 0; | ||||||
| 	limits->discard_granularity = DMZ_BLOCK_SIZE; | 	limits->discard_granularity = DMZ_BLOCK_SIZE; | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Christoph Hellwig
						Christoph Hellwig