forked from mirrors/linux
		
	dm mpath: optimize NVMe bio-based support
All code that deals with pg_init is not used with bio-based NVMe mode. This includes skipping initialization of pg_init related variables. Also, pg_init related members on 'struct multipath' have been grouped together. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
		
							parent
							
								
									cd02538445
								
							
						
					
					
						commit
						848b8aefd4
					
				
					 1 changed files with 95 additions and 76 deletions
				
			
		|  | @ -64,36 +64,30 @@ struct priority_group { | |||
| 
 | ||||
| /* Multipath context */ | ||||
| struct multipath { | ||||
| 	struct list_head list; | ||||
| 	struct dm_target *ti; | ||||
| 
 | ||||
| 	const char *hw_handler_name; | ||||
| 	char *hw_handler_params; | ||||
| 	unsigned long flags;		/* Multipath state flags */ | ||||
| 
 | ||||
| 	spinlock_t lock; | ||||
| 
 | ||||
| 	unsigned nr_priority_groups; | ||||
| 	struct list_head priority_groups; | ||||
| 
 | ||||
| 	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */ | ||||
| 	enum dm_queue_mode queue_mode; | ||||
| 
 | ||||
| 	struct pgpath *current_pgpath; | ||||
| 	struct priority_group *current_pg; | ||||
| 	struct priority_group *next_pg;	/* Switch to this PG if set */ | ||||
| 
 | ||||
| 	unsigned long flags;		/* Multipath state flags */ | ||||
| 	atomic_t nr_valid_paths;	/* Total number of usable paths */ | ||||
| 	unsigned nr_priority_groups; | ||||
| 	struct list_head priority_groups; | ||||
| 
 | ||||
| 	const char *hw_handler_name; | ||||
| 	char *hw_handler_params; | ||||
| 	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */ | ||||
| 	unsigned pg_init_retries;	/* Number of times to retry pg_init */ | ||||
| 	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */ | ||||
| 
 | ||||
| 	atomic_t nr_valid_paths;	/* Total number of usable paths */ | ||||
| 	atomic_t pg_init_in_progress;	/* Only one pg_init allowed at once */ | ||||
| 	atomic_t pg_init_count;		/* Number of times pg_init called */ | ||||
| 
 | ||||
| 	enum dm_queue_mode queue_mode; | ||||
| 
 | ||||
| 	struct mutex work_mutex; | ||||
| 	struct work_struct trigger_event; | ||||
| 	struct dm_target *ti; | ||||
| 
 | ||||
| 	struct work_struct process_queued_bios; | ||||
| 	struct bio_list queued_bios; | ||||
|  | @ -135,10 +129,10 @@ static struct pgpath *alloc_pgpath(void) | |||
| { | ||||
| 	struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); | ||||
| 
 | ||||
| 	if (pgpath) { | ||||
| 	if (!pgpath) | ||||
| 		return NULL; | ||||
| 
 | ||||
| 	pgpath->is_active = true; | ||||
| 		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work); | ||||
| 	} | ||||
| 
 | ||||
| 	return pgpath; | ||||
| } | ||||
|  | @ -193,13 +187,8 @@ static struct multipath *alloc_multipath(struct dm_target *ti) | |||
| 	if (m) { | ||||
| 		INIT_LIST_HEAD(&m->priority_groups); | ||||
| 		spin_lock_init(&m->lock); | ||||
| 		set_bit(MPATHF_QUEUE_IO, &m->flags); | ||||
| 		atomic_set(&m->nr_valid_paths, 0); | ||||
| 		atomic_set(&m->pg_init_in_progress, 0); | ||||
| 		atomic_set(&m->pg_init_count, 0); | ||||
| 		m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; | ||||
| 		INIT_WORK(&m->trigger_event, trigger_event); | ||||
| 		init_waitqueue_head(&m->pg_init_wait); | ||||
| 		mutex_init(&m->work_mutex); | ||||
| 
 | ||||
| 		m->queue_mode = DM_TYPE_NONE; | ||||
|  | @ -235,6 +224,14 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) | |||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) { | ||||
| 		set_bit(MPATHF_QUEUE_IO, &m->flags); | ||||
| 		atomic_set(&m->pg_init_in_progress, 0); | ||||
| 		atomic_set(&m->pg_init_count, 0); | ||||
| 		m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; | ||||
| 		init_waitqueue_head(&m->pg_init_wait); | ||||
| 	} | ||||
| 
 | ||||
| 	dm_table_set_type(ti->table, m->queue_mode); | ||||
| 
 | ||||
| 	return 0; | ||||
|  | @ -339,6 +336,9 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg) | |||
| { | ||||
| 	m->current_pg = pg; | ||||
| 
 | ||||
| 	if (m->queue_mode == DM_TYPE_NVME_BIO_BASED) | ||||
| 		return; | ||||
| 
 | ||||
| 	/* Must we initialise the PG first, and queue I/O till it's ready? */ | ||||
| 	if (m->hw_handler_name) { | ||||
| 		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); | ||||
|  | @ -384,6 +384,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) | |||
| 	unsigned bypassed = 1; | ||||
| 
 | ||||
| 	if (!atomic_read(&m->nr_valid_paths)) { | ||||
| 		if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) | ||||
| 			clear_bit(MPATHF_QUEUE_IO, &m->flags); | ||||
| 		goto failed; | ||||
| 	} | ||||
|  | @ -528,8 +529,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, | |||
| 	clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC); | ||||
| 	if (IS_ERR(clone)) { | ||||
| 		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */ | ||||
| 		bool queue_dying = blk_queue_dying(q); | ||||
| 		if (queue_dying) { | ||||
| 		if (blk_queue_dying(q)) { | ||||
| 			atomic_inc(&m->pg_init_in_progress); | ||||
| 			activate_or_offline_path(pgpath); | ||||
| 		} | ||||
|  | @ -563,21 +563,28 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m | |||
| 
 | ||||
| 	/* Do we need to select a new pgpath? */ | ||||
| 	pgpath = READ_ONCE(m->current_pgpath); | ||||
| 	/* MPATHF_QUEUE_IO will never be set for NVMe */ | ||||
| 	queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); | ||||
| 	if (!pgpath || !queue_io) | ||||
| 		pgpath = choose_pgpath(m, mpio->nr_bytes); | ||||
| 
 | ||||
| 	if ((pgpath && queue_io) || | ||||
| 	    (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) { | ||||
| 	if ((!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) || | ||||
| 	    (pgpath && queue_io)) { | ||||
| 		/* Queue for the daemon to resubmit */ | ||||
| 		spin_lock_irqsave(&m->lock, flags); | ||||
| 		bio_list_add(&m->queued_bios, bio); | ||||
| 		spin_unlock_irqrestore(&m->lock, flags); | ||||
| 
 | ||||
| 		if (m->queue_mode == DM_TYPE_NVME_BIO_BASED) { | ||||
| 			queue_work(kmultipathd, &m->process_queued_bios); | ||||
| 		} else { | ||||
| 			/* PG_INIT_REQUIRED cannot be set without QUEUE_IO */ | ||||
| 			if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) | ||||
| 				pg_init_all_paths(m); | ||||
| 			else if (!queue_io) | ||||
| 				queue_work(kmultipathd, &m->process_queued_bios); | ||||
| 		} | ||||
| 
 | ||||
| 		return DM_MAPIO_SUBMITTED; | ||||
| 	} | ||||
| 
 | ||||
|  | @ -750,34 +757,11 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg, | |||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps, | ||||
| 			       struct dm_target *ti) | ||||
| static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, char **error) | ||||
| { | ||||
| 	int r; | ||||
| 	struct pgpath *p; | ||||
| 	struct multipath *m = ti->private; | ||||
| 	struct request_queue *q = NULL; | ||||
| 	struct request_queue *q = bdev_get_queue(bdev); | ||||
| 	const char *attached_handler_name; | ||||
| 
 | ||||
| 	/* we need at least a path arg */ | ||||
| 	if (as->argc < 1) { | ||||
| 		ti->error = "no device given"; | ||||
| 		return ERR_PTR(-EINVAL); | ||||
| 	} | ||||
| 
 | ||||
| 	p = alloc_pgpath(); | ||||
| 	if (!p) | ||||
| 		return ERR_PTR(-ENOMEM); | ||||
| 
 | ||||
| 	r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table), | ||||
| 			  &p->path.dev); | ||||
| 	if (r) { | ||||
| 		ti->error = "error getting device"; | ||||
| 		goto bad; | ||||
| 	} | ||||
| 
 | ||||
| 	if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name) | ||||
| 		q = bdev_get_queue(p->path.dev->bdev); | ||||
| 	int r; | ||||
| 
 | ||||
| 	if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) { | ||||
| retain: | ||||
|  | @ -809,23 +793,56 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps | |||
| 			char b[BDEVNAME_SIZE]; | ||||
| 
 | ||||
| 			printk(KERN_INFO "dm-mpath: retaining handler on device %s\n", | ||||
| 				bdevname(p->path.dev->bdev, b)); | ||||
| 			       bdevname(bdev, b)); | ||||
| 			goto retain; | ||||
| 		} | ||||
| 		if (r < 0) { | ||||
| 			ti->error = "error attaching hardware handler"; | ||||
| 			dm_put_device(ti, p->path.dev); | ||||
| 			goto bad; | ||||
| 			*error = "error attaching hardware handler"; | ||||
| 			return r; | ||||
| 		} | ||||
| 
 | ||||
| 		if (m->hw_handler_params) { | ||||
| 			r = scsi_dh_set_params(q, m->hw_handler_params); | ||||
| 			if (r < 0) { | ||||
| 				ti->error = "unable to set hardware " | ||||
| 							"handler parameters"; | ||||
| 				dm_put_device(ti, p->path.dev); | ||||
| 				*error = "unable to set hardware handler parameters"; | ||||
| 				return r; | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps, | ||||
| 				 struct dm_target *ti) | ||||
| { | ||||
| 	int r; | ||||
| 	struct pgpath *p; | ||||
| 	struct multipath *m = ti->private; | ||||
| 
 | ||||
| 	/* we need at least a path arg */ | ||||
| 	if (as->argc < 1) { | ||||
| 		ti->error = "no device given"; | ||||
| 		return ERR_PTR(-EINVAL); | ||||
| 	} | ||||
| 
 | ||||
| 	p = alloc_pgpath(); | ||||
| 	if (!p) | ||||
| 		return ERR_PTR(-ENOMEM); | ||||
| 
 | ||||
| 	r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table), | ||||
| 			  &p->path.dev); | ||||
| 	if (r) { | ||||
| 		ti->error = "error getting device"; | ||||
| 		goto bad; | ||||
| 	} | ||||
| 
 | ||||
| 	if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) { | ||||
| 		INIT_DELAYED_WORK(&p->activate_path, activate_path_work); | ||||
| 		r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error); | ||||
| 		if (r) { | ||||
| 			dm_put_device(ti, p->path.dev); | ||||
| 			goto bad; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
|  | @ -836,7 +853,6 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps | |||
| 	} | ||||
| 
 | ||||
| 	return p; | ||||
| 
 | ||||
|  bad: | ||||
| 	free_pgpath(p); | ||||
| 	return ERR_PTR(r); | ||||
|  | @ -1152,18 +1168,21 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m) | |||
| 
 | ||||
| static void flush_multipath_work(struct multipath *m) | ||||
| { | ||||
| 	if (m->hw_handler_name) { | ||||
| 		set_bit(MPATHF_PG_INIT_DISABLED, &m->flags); | ||||
| 		smp_mb__after_atomic(); | ||||
| 
 | ||||
| 		flush_workqueue(kmpath_handlerd); | ||||
| 		multipath_wait_for_pg_init_completion(m); | ||||
| 	flush_workqueue(kmultipathd); | ||||
| 	flush_work(&m->trigger_event); | ||||
| 
 | ||||
| 		clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags); | ||||
| 		smp_mb__after_atomic(); | ||||
| 	} | ||||
| 
 | ||||
| 	flush_workqueue(kmultipathd); | ||||
| 	flush_work(&m->trigger_event); | ||||
| } | ||||
| 
 | ||||
| static void multipath_dtr(struct dm_target *ti) | ||||
| { | ||||
| 	struct multipath *m = ti->private; | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Mike Snitzer
						Mike Snitzer