forked from mirrors/linux
		
	scsi: core: avoid host-wide host_busy counter for scsi_mq
It isn't necessary to check the host depth in scsi_queue_rq() any more since it has been respected by blk-mq before calling scsi_queue_rq() via getting driver tag. Lots of LUNs may attach to same host and per-host IOPS may reach millions, so we should avoid expensive atomic operations on the host-wide counter in the IO path. This patch implements scsi_host_busy() via blk_mq_tagset_busy_iter() for reading the count of busy IOs for scsi_mq. It is observed that IOPS is increased by 15% in IO test on scsi_debug (32 LUNs, 32 submit queues, 1024 can_queue, libaio/dio) in a dual-socket system. [mkp: clarified commit message] Cc: Omar Sandoval <osandov@fb.com>, Cc: "Martin K. Petersen" <martin.petersen@oracle.com>, Cc: James Bottomley <james.bottomley@hansenpartnership.com>, Cc: Christoph Hellwig <hch@lst.de>, Cc: Don Brace <don.brace@microsemi.com> Cc: Kashyap Desai <kashyap.desai@broadcom.com> Cc: Mike Snitzer <snitzer@redhat.com> Cc: Hannes Reinecke <hare@suse.de> Cc: Laurence Oberman <loberman@redhat.com> Cc: Bart Van Assche <bart.vanassche@wdc.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Bart Van Assche <bart.vanassche@wdc.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
		
							parent
							
								
									c84b023a4c
								
							
						
					
					
						commit
						328728630d
					
				
					 2 changed files with 40 additions and 7 deletions
				
			
		|  | @ -563,13 +563,35 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost) | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(scsi_host_get); | EXPORT_SYMBOL(scsi_host_get); | ||||||
| 
 | 
 | ||||||
|  | struct scsi_host_mq_in_flight { | ||||||
|  | 	int cnt; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | static void scsi_host_check_in_flight(struct request *rq, void *data, | ||||||
|  | 		bool reserved) | ||||||
|  | { | ||||||
|  | 	struct scsi_host_mq_in_flight *in_flight = data; | ||||||
|  | 
 | ||||||
|  | 	if (blk_mq_request_started(rq)) | ||||||
|  | 		in_flight->cnt++; | ||||||
|  | } | ||||||
|  | 
 | ||||||
| /**
 | /**
 | ||||||
|  * scsi_host_busy - Return the host busy counter |  * scsi_host_busy - Return the host busy counter | ||||||
|  * @shost:	Pointer to Scsi_Host to inc. |  * @shost:	Pointer to Scsi_Host to inc. | ||||||
|  **/ |  **/ | ||||||
| int scsi_host_busy(struct Scsi_Host *shost) | int scsi_host_busy(struct Scsi_Host *shost) | ||||||
| { | { | ||||||
| 	return atomic_read(&shost->host_busy); | 	struct scsi_host_mq_in_flight in_flight = { | ||||||
|  | 		.cnt = 0, | ||||||
|  | 	}; | ||||||
|  | 
 | ||||||
|  | 	if (!shost->use_blk_mq) | ||||||
|  | 		return atomic_read(&shost->host_busy); | ||||||
|  | 
 | ||||||
|  | 	blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight, | ||||||
|  | 			&in_flight); | ||||||
|  | 	return in_flight.cnt; | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(scsi_host_busy); | EXPORT_SYMBOL(scsi_host_busy); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -345,7 +345,8 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost) | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
| 	rcu_read_lock(); | 	rcu_read_lock(); | ||||||
| 	atomic_dec(&shost->host_busy); | 	if (!shost->use_blk_mq) | ||||||
|  | 		atomic_dec(&shost->host_busy); | ||||||
| 	if (unlikely(scsi_host_in_recovery(shost))) { | 	if (unlikely(scsi_host_in_recovery(shost))) { | ||||||
| 		spin_lock_irqsave(shost->host_lock, flags); | 		spin_lock_irqsave(shost->host_lock, flags); | ||||||
| 		if (shost->host_failed || shost->host_eh_scheduled) | 		if (shost->host_failed || shost->host_eh_scheduled) | ||||||
|  | @ -444,7 +445,12 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget) | ||||||
| 
 | 
 | ||||||
| static inline bool scsi_host_is_busy(struct Scsi_Host *shost) | static inline bool scsi_host_is_busy(struct Scsi_Host *shost) | ||||||
| { | { | ||||||
| 	if (shost->can_queue > 0 && | 	/*
 | ||||||
|  | 	 * blk-mq can handle host queue busy efficiently via host-wide driver | ||||||
|  | 	 * tag allocation | ||||||
|  | 	 */ | ||||||
|  | 
 | ||||||
|  | 	if (!shost->use_blk_mq && shost->can_queue > 0 && | ||||||
| 	    atomic_read(&shost->host_busy) >= shost->can_queue) | 	    atomic_read(&shost->host_busy) >= shost->can_queue) | ||||||
| 		return true; | 		return true; | ||||||
| 	if (atomic_read(&shost->host_blocked) > 0) | 	if (atomic_read(&shost->host_blocked) > 0) | ||||||
|  | @ -1600,9 +1606,12 @@ static inline int scsi_host_queue_ready(struct request_queue *q, | ||||||
| 	if (scsi_host_in_recovery(shost)) | 	if (scsi_host_in_recovery(shost)) | ||||||
| 		return 0; | 		return 0; | ||||||
| 
 | 
 | ||||||
| 	busy = atomic_inc_return(&shost->host_busy) - 1; | 	if (!shost->use_blk_mq) | ||||||
|  | 		busy = atomic_inc_return(&shost->host_busy) - 1; | ||||||
|  | 	else | ||||||
|  | 		busy = 0; | ||||||
| 	if (atomic_read(&shost->host_blocked) > 0) { | 	if (atomic_read(&shost->host_blocked) > 0) { | ||||||
| 		if (busy) | 		if (busy || scsi_host_busy(shost)) | ||||||
| 			goto starved; | 			goto starved; | ||||||
| 
 | 
 | ||||||
| 		/*
 | 		/*
 | ||||||
|  | @ -1616,7 +1625,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q, | ||||||
| 				     "unblocking host at zero depth\n")); | 				     "unblocking host at zero depth\n")); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (shost->can_queue > 0 && busy >= shost->can_queue) | 	if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue) | ||||||
| 		goto starved; | 		goto starved; | ||||||
| 	if (shost->host_self_blocked) | 	if (shost->host_self_blocked) | ||||||
| 		goto starved; | 		goto starved; | ||||||
|  | @ -1702,7 +1711,9 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) | ||||||
| 	 * with the locks as normal issue path does. | 	 * with the locks as normal issue path does. | ||||||
| 	 */ | 	 */ | ||||||
| 	atomic_inc(&sdev->device_busy); | 	atomic_inc(&sdev->device_busy); | ||||||
| 	atomic_inc(&shost->host_busy); | 
 | ||||||
|  | 	if (!shost->use_blk_mq) | ||||||
|  | 		atomic_inc(&shost->host_busy); | ||||||
| 	if (starget->can_queue > 0) | 	if (starget->can_queue > 0) | ||||||
| 		atomic_inc(&starget->target_busy); | 		atomic_inc(&starget->target_busy); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Ming Lei
						Ming Lei