forked from mirrors/linux
		
	dm mpath: remove unnecessary NVMe branching in favor of scsi_dh checks
This eliminates the "queue_mode" configuration's "nvme" mode.  There
wasn't anything NVMe-specific about that mode.  It was named "nvme"
because it was a short name for the mode.  But the entire point of the
mode was to optimize the multipath target for underlying devices that
are _not_ SCSI-based.  Devices that aren't SCSI have no need for the
various SCSI device handler (scsi_dh) specific code in DM multipath.
But rather than narrowly define this scsi_dh vs not branching in terms
of "nvme": invert the logic so that we're just checking whether a
multipath device is layered on SCSI devices with scsi_dh attached.
This allows any future storage technology to avoid scsi_dh specific code
in the multipath target too.
Fixes: 848b8aefd4 ("dm mpath: optimize NVMe bio-based support")
Suggested-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
			
			
This commit is contained in:
		
							parent
							
								
									99243b922c
								
							
						
					
					
						commit
						8d47e65948
					
				
					 1 changed files with 29 additions and 37 deletions
				
			
		| 
						 | 
					@ -22,6 +22,7 @@
 | 
				
			||||||
#include <linux/time.h>
 | 
					#include <linux/time.h>
 | 
				
			||||||
#include <linux/workqueue.h>
 | 
					#include <linux/workqueue.h>
 | 
				
			||||||
#include <linux/delay.h>
 | 
					#include <linux/delay.h>
 | 
				
			||||||
 | 
					#include <scsi/scsi_device.h>
 | 
				
			||||||
#include <scsi/scsi_dh.h>
 | 
					#include <scsi/scsi_dh.h>
 | 
				
			||||||
#include <linux/atomic.h>
 | 
					#include <linux/atomic.h>
 | 
				
			||||||
#include <linux/blk-mq.h>
 | 
					#include <linux/blk-mq.h>
 | 
				
			||||||
| 
						 | 
					@ -211,26 +212,14 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
 | 
				
			||||||
		else
 | 
							else
 | 
				
			||||||
			m->queue_mode = DM_TYPE_REQUEST_BASED;
 | 
								m->queue_mode = DM_TYPE_REQUEST_BASED;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	} else if (m->queue_mode == DM_TYPE_BIO_BASED ||
 | 
						} else if (m->queue_mode == DM_TYPE_BIO_BASED) {
 | 
				
			||||||
		   m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
 | 
					 | 
				
			||||||
		INIT_WORK(&m->process_queued_bios, process_queued_bios);
 | 
							INIT_WORK(&m->process_queued_bios, process_queued_bios);
 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (m->queue_mode == DM_TYPE_BIO_BASED) {
 | 
					 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * bio-based doesn't support any direct scsi_dh management;
 | 
							 * bio-based doesn't support any direct scsi_dh management;
 | 
				
			||||||
		 * it just discovers if a scsi_dh is attached.
 | 
							 * it just discovers if a scsi_dh is attached.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
 | 
							set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) {
 | 
					 | 
				
			||||||
		set_bit(MPATHF_QUEUE_IO, &m->flags);
 | 
					 | 
				
			||||||
		atomic_set(&m->pg_init_in_progress, 0);
 | 
					 | 
				
			||||||
		atomic_set(&m->pg_init_count, 0);
 | 
					 | 
				
			||||||
		m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
 | 
					 | 
				
			||||||
		init_waitqueue_head(&m->pg_init_wait);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_table_set_type(ti->table, m->queue_mode);
 | 
						dm_table_set_type(ti->table, m->queue_mode);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -337,14 +326,12 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	m->current_pg = pg;
 | 
						m->current_pg = pg;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (m->queue_mode == DM_TYPE_NVME_BIO_BASED)
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* Must we initialise the PG first, and queue I/O till it's ready? */
 | 
						/* Must we initialise the PG first, and queue I/O till it's ready? */
 | 
				
			||||||
	if (m->hw_handler_name) {
 | 
						if (m->hw_handler_name) {
 | 
				
			||||||
		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 | 
							set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 | 
				
			||||||
		set_bit(MPATHF_QUEUE_IO, &m->flags);
 | 
							set_bit(MPATHF_QUEUE_IO, &m->flags);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
 | 
							/* FIXME: not needed if no scsi_dh is attached */
 | 
				
			||||||
		clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 | 
							clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 | 
				
			||||||
		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 | 
							clear_bit(MPATHF_QUEUE_IO, &m->flags);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -385,7 +372,6 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
 | 
				
			||||||
	unsigned bypassed = 1;
 | 
						unsigned bypassed = 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!atomic_read(&m->nr_valid_paths)) {
 | 
						if (!atomic_read(&m->nr_valid_paths)) {
 | 
				
			||||||
		if (m->queue_mode != DM_TYPE_NVME_BIO_BASED)
 | 
					 | 
				
			||||||
		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 | 
							clear_bit(MPATHF_QUEUE_IO, &m->flags);
 | 
				
			||||||
		goto failed;
 | 
							goto failed;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -599,7 +585,7 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
 | 
				
			||||||
	return pgpath;
 | 
						return pgpath;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct pgpath *__map_bio_nvme(struct multipath *m, struct bio *bio)
 | 
					static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct pgpath *pgpath;
 | 
						struct pgpath *pgpath;
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
| 
						 | 
					@ -634,8 +620,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct pgpath *pgpath;
 | 
						struct pgpath *pgpath;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (m->queue_mode == DM_TYPE_NVME_BIO_BASED)
 | 
						if (!m->hw_handler_name)
 | 
				
			||||||
		pgpath = __map_bio_nvme(m, bio);
 | 
							pgpath = __map_bio_fast(m, bio);
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		pgpath = __map_bio(m, bio);
 | 
							pgpath = __map_bio(m, bio);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -675,8 +661,7 @@ static void process_queued_io_list(struct multipath *m)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
 | 
						if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
 | 
				
			||||||
		dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
 | 
							dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
 | 
				
			||||||
	else if (m->queue_mode == DM_TYPE_BIO_BASED ||
 | 
						else if (m->queue_mode == DM_TYPE_BIO_BASED)
 | 
				
			||||||
		 m->queue_mode == DM_TYPE_NVME_BIO_BASED)
 | 
					 | 
				
			||||||
		queue_work(kmultipathd, &m->process_queued_bios);
 | 
							queue_work(kmultipathd, &m->process_queued_bios);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -838,6 +823,16 @@ static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, char **
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
			kfree(m->hw_handler_name);
 | 
								kfree(m->hw_handler_name);
 | 
				
			||||||
			m->hw_handler_name = attached_handler_name;
 | 
								m->hw_handler_name = attached_handler_name;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								/*
 | 
				
			||||||
 | 
								 * Init fields that are only used when a scsi_dh is attached
 | 
				
			||||||
 | 
								 */
 | 
				
			||||||
 | 
								if (!test_and_set_bit(MPATHF_QUEUE_IO, &m->flags)) {
 | 
				
			||||||
 | 
									atomic_set(&m->pg_init_in_progress, 0);
 | 
				
			||||||
 | 
									atomic_set(&m->pg_init_count, 0);
 | 
				
			||||||
 | 
									m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
 | 
				
			||||||
 | 
									init_waitqueue_head(&m->pg_init_wait);
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -873,6 +868,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
 | 
				
			||||||
	int r;
 | 
						int r;
 | 
				
			||||||
	struct pgpath *p;
 | 
						struct pgpath *p;
 | 
				
			||||||
	struct multipath *m = ti->private;
 | 
						struct multipath *m = ti->private;
 | 
				
			||||||
 | 
						struct scsi_device *sdev;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* we need at least a path arg */
 | 
						/* we need at least a path arg */
 | 
				
			||||||
	if (as->argc < 1) {
 | 
						if (as->argc < 1) {
 | 
				
			||||||
| 
						 | 
					@ -891,7 +887,9 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
 | 
				
			||||||
		goto bad;
 | 
							goto bad;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) {
 | 
						sdev = scsi_device_from_queue(bdev_get_queue(p->path.dev->bdev));
 | 
				
			||||||
 | 
						if (sdev) {
 | 
				
			||||||
 | 
							put_device(&sdev->sdev_gendev);
 | 
				
			||||||
		INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
 | 
							INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
 | 
				
			||||||
		r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error);
 | 
							r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error);
 | 
				
			||||||
		if (r) {
 | 
							if (r) {
 | 
				
			||||||
| 
						 | 
					@ -1001,8 +999,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
 | 
				
			||||||
	if (!hw_argc)
 | 
						if (!hw_argc)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (m->queue_mode == DM_TYPE_BIO_BASED ||
 | 
						if (m->queue_mode == DM_TYPE_BIO_BASED) {
 | 
				
			||||||
	    m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
 | 
					 | 
				
			||||||
		dm_consume_args(as, hw_argc);
 | 
							dm_consume_args(as, hw_argc);
 | 
				
			||||||
		DMERR("bio-based multipath doesn't allow hardware handler args");
 | 
							DMERR("bio-based multipath doesn't allow hardware handler args");
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
| 
						 | 
					@ -1091,8 +1088,6 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (!strcasecmp(queue_mode_name, "bio"))
 | 
								if (!strcasecmp(queue_mode_name, "bio"))
 | 
				
			||||||
				m->queue_mode = DM_TYPE_BIO_BASED;
 | 
									m->queue_mode = DM_TYPE_BIO_BASED;
 | 
				
			||||||
			else if (!strcasecmp(queue_mode_name, "nvme"))
 | 
					 | 
				
			||||||
				m->queue_mode = DM_TYPE_NVME_BIO_BASED;
 | 
					 | 
				
			||||||
			else if (!strcasecmp(queue_mode_name, "rq"))
 | 
								else if (!strcasecmp(queue_mode_name, "rq"))
 | 
				
			||||||
				m->queue_mode = DM_TYPE_REQUEST_BASED;
 | 
									m->queue_mode = DM_TYPE_REQUEST_BASED;
 | 
				
			||||||
			else if (!strcasecmp(queue_mode_name, "mq"))
 | 
								else if (!strcasecmp(queue_mode_name, "mq"))
 | 
				
			||||||
| 
						 | 
					@ -1193,7 +1188,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
 | 
				
			||||||
	ti->num_discard_bios = 1;
 | 
						ti->num_discard_bios = 1;
 | 
				
			||||||
	ti->num_write_same_bios = 1;
 | 
						ti->num_write_same_bios = 1;
 | 
				
			||||||
	ti->num_write_zeroes_bios = 1;
 | 
						ti->num_write_zeroes_bios = 1;
 | 
				
			||||||
	if (m->queue_mode == DM_TYPE_BIO_BASED || m->queue_mode == DM_TYPE_NVME_BIO_BASED)
 | 
						if (m->queue_mode == DM_TYPE_BIO_BASED)
 | 
				
			||||||
		ti->per_io_data_size = multipath_per_bio_data_size();
 | 
							ti->per_io_data_size = multipath_per_bio_data_size();
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		ti->per_io_data_size = sizeof(struct dm_mpath_io);
 | 
							ti->per_io_data_size = sizeof(struct dm_mpath_io);
 | 
				
			||||||
| 
						 | 
					@ -1730,9 +1725,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
 | 
				
			||||||
			case DM_TYPE_BIO_BASED:
 | 
								case DM_TYPE_BIO_BASED:
 | 
				
			||||||
				DMEMIT("queue_mode bio ");
 | 
									DMEMIT("queue_mode bio ");
 | 
				
			||||||
				break;
 | 
									break;
 | 
				
			||||||
			case DM_TYPE_NVME_BIO_BASED:
 | 
					 | 
				
			||||||
				DMEMIT("queue_mode nvme ");
 | 
					 | 
				
			||||||
				break;
 | 
					 | 
				
			||||||
			case DM_TYPE_MQ_REQUEST_BASED:
 | 
								case DM_TYPE_MQ_REQUEST_BASED:
 | 
				
			||||||
				DMEMIT("queue_mode mq ");
 | 
									DMEMIT("queue_mode mq ");
 | 
				
			||||||
				break;
 | 
									break;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue