mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	dm zoned: ignore last smaller runt zone
The SCSI layer allows ZBC drives to have a smaller last runt zone. For such a device, specifying the entire capacity for a dm-zoned target table entry fails because the specified capacity is not aligned on a device zone size indicated in the request queue structure of the device. Fix this problem by ignoring the last runt zone in the entry length when seting up the dm-zoned target (ctr method) and when iterating table entries of the target (iterate_devices method). This allows dm-zoned users to still easily setup a target using the entire device capacity (as mandated by dm-zoned) or the aligned capacity excluding the last runt zone. While at it, replace direct references to the device queue chunk_sectors limit with calls to the accessor blk_queue_zone_sectors(). Reported-by: Peter Desnoyers <pjd@ccs.neu.edu> Cc: stable@vger.kernel.org Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
		
							parent
							
								
									fbc61291d7
								
							
						
					
					
						commit
						114e025968
					
				
					 1 changed files with 9 additions and 4 deletions
				
			
		| 
						 | 
				
			
			@ -660,6 +660,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
 | 
			
		|||
	struct dmz_target *dmz = ti->private;
 | 
			
		||||
	struct request_queue *q;
 | 
			
		||||
	struct dmz_dev *dev;
 | 
			
		||||
	sector_t aligned_capacity;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	/* Get the target device */
 | 
			
		||||
| 
						 | 
				
			
			@ -685,15 +686,17 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
 | 
			
		|||
		goto err;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	q = bdev_get_queue(dev->bdev);
 | 
			
		||||
	dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
 | 
			
		||||
	if (ti->begin || (ti->len != dev->capacity)) {
 | 
			
		||||
	aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1);
 | 
			
		||||
	if (ti->begin ||
 | 
			
		||||
	    ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
 | 
			
		||||
		ti->error = "Partial mapping not supported";
 | 
			
		||||
		ret = -EINVAL;
 | 
			
		||||
		goto err;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	q = bdev_get_queue(dev->bdev);
 | 
			
		||||
	dev->zone_nr_sectors = q->limits.chunk_sectors;
 | 
			
		||||
	dev->zone_nr_sectors = blk_queue_zone_sectors(q);
 | 
			
		||||
	dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
 | 
			
		||||
 | 
			
		||||
	dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
 | 
			
		||||
| 
						 | 
				
			
			@ -929,8 +932,10 @@ static int dmz_iterate_devices(struct dm_target *ti,
 | 
			
		|||
			       iterate_devices_callout_fn fn, void *data)
 | 
			
		||||
{
 | 
			
		||||
	struct dmz_target *dmz = ti->private;
 | 
			
		||||
	struct dmz_dev *dev = dmz->dev;
 | 
			
		||||
	sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1);
 | 
			
		||||
 | 
			
		||||
	return fn(ti, dmz->ddev, 0, dmz->dev->capacity, data);
 | 
			
		||||
	return fn(ti, dmz->ddev, 0, capacity, data);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static struct target_type dmz_type = {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue