forked from mirrors/linux
		
	dm mpath: micro-optimize the hot path relative to MPATHF_QUEUE_IF_NO_PATH
Instead of checking MPATHF_QUEUE_IF_NO_PATH, MPATHF_SAVED_QUEUE_IF_NO_PATH and the no_flush flag to decide whether or not to push back a request (or bio) if there are no paths available, only clear MPATHF_QUEUE_IF_NO_PATH in queue_if_no_path() if no_flush has not been set. The result is that only a single bit has to be tested in the hot path to decide whether or not a request must be pushed back and also that m->lock does not have to be taken in the hot path. Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
		
							parent
							
								
									7e0d574f26
								
							
						
					
					
						commit
						ca5beb76c3
					
				
					 1 changed files with 11 additions and 59 deletions
				
			
		|  | @ -441,47 +441,6 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) | |||
| 	return NULL; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Check whether bios must be queued in the device-mapper core rather | ||||
|  * than here in the target. | ||||
|  * | ||||
|  * If m->queue_if_no_path and m->saved_queue_if_no_path hold the | ||||
|  * same value then we are not between multipath_presuspend() | ||||
|  * and multipath_resume() calls and we have no need to check | ||||
|  * for the DMF_NOFLUSH_SUSPENDING flag. | ||||
|  */ | ||||
| static bool __must_push_back(struct multipath *m) | ||||
| { | ||||
| 	return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) != | ||||
| 		 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) && | ||||
| 		dm_noflush_suspending(m->ti)); | ||||
| } | ||||
| 
 | ||||
| static bool must_push_back_rq(struct multipath *m) | ||||
| { | ||||
| 	bool r; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&m->lock, flags); | ||||
| 	r = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || | ||||
| 	     __must_push_back(m)); | ||||
| 	spin_unlock_irqrestore(&m->lock, flags); | ||||
| 
 | ||||
| 	return r; | ||||
| } | ||||
| 
 | ||||
| static bool must_push_back_bio(struct multipath *m) | ||||
| { | ||||
| 	bool r; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&m->lock, flags); | ||||
| 	r = __must_push_back(m); | ||||
| 	spin_unlock_irqrestore(&m->lock, flags); | ||||
| 
 | ||||
| 	return r; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Map cloned requests (request-based multipath) | ||||
|  */ | ||||
|  | @ -503,7 +462,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, | |||
| 		pgpath = choose_pgpath(m, nr_bytes); | ||||
| 
 | ||||
| 	if (!pgpath) { | ||||
| 		if (must_push_back_rq(m)) | ||||
| 		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) | ||||
| 			return DM_MAPIO_DELAY_REQUEUE; | ||||
| 		return -EIO;	/* Failed */ | ||||
| 	} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) || | ||||
|  | @ -580,9 +539,9 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m | |||
| 	} | ||||
| 
 | ||||
| 	if (!pgpath) { | ||||
| 		if (!must_push_back_bio(m)) | ||||
| 			return -EIO; | ||||
| 		return DM_MAPIO_REQUEUE; | ||||
| 		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) | ||||
| 			return DM_MAPIO_REQUEUE; | ||||
| 		return -EIO; | ||||
| 	} | ||||
| 
 | ||||
| 	mpio->pgpath = pgpath; | ||||
|  | @ -674,7 +633,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path, | |||
| 		else | ||||
| 			clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags); | ||||
| 	} | ||||
| 	if (queue_if_no_path) | ||||
| 	if (queue_if_no_path || dm_noflush_suspending(m->ti)) | ||||
| 		set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags); | ||||
| 	else | ||||
| 		clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags); | ||||
|  | @ -1519,12 +1478,9 @@ static int do_end_io(struct multipath *m, struct request *clone, | |||
| 	if (mpio->pgpath) | ||||
| 		fail_path(mpio->pgpath); | ||||
| 
 | ||||
| 	if (!atomic_read(&m->nr_valid_paths)) { | ||||
| 		if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { | ||||
| 			if (!must_push_back_rq(m)) | ||||
| 				r = -EIO; | ||||
| 		} | ||||
| 	} | ||||
| 	if (atomic_read(&m->nr_valid_paths) == 0 && | ||||
| 	    !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) | ||||
| 		r = -EIO; | ||||
| 
 | ||||
| 	return r; | ||||
| } | ||||
|  | @ -1565,13 +1521,9 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone, | |||
| 	if (mpio->pgpath) | ||||
| 		fail_path(mpio->pgpath); | ||||
| 
 | ||||
| 	if (!atomic_read(&m->nr_valid_paths)) { | ||||
| 		if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { | ||||
| 			if (!must_push_back_bio(m)) | ||||
| 				return -EIO; | ||||
| 			return DM_ENDIO_REQUEUE; | ||||
| 		} | ||||
| 	} | ||||
| 	if (atomic_read(&m->nr_valid_paths) == 0 && | ||||
| 	    !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) | ||||
| 		return -EIO; | ||||
| 
 | ||||
| 	/* Queue for the daemon to resubmit */ | ||||
| 	dm_bio_restore(get_bio_details_from_bio(clone), clone); | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Bart Van Assche
						Bart Van Assche