mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	blk-flush: reuse rq queuelist in flush state machine
Since we don't need to maintain inflight flush_data requests list anymore, we can reuse rq->queuelist for flush pending list. Note in mq_flush_data_end_io(), we need to re-initialize rq->queuelist before reusing it in the state machine when end, since the rq->rq_next also reuse it, may have corrupted rq->queuelist by the driver. This patch decrease the size of struct request by 16 bytes. Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20230717040058.3993930-5-chengming.zhou@linux.dev Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									b175c86739
								
							
						
					
					
						commit
						81ada09cc2
					
				
					 2 changed files with 10 additions and 8 deletions
				
			
		| 
						 | 
					@ -183,14 +183,13 @@ static void blk_flush_complete_seq(struct request *rq,
 | 
				
			||||||
		/* queue for flush */
 | 
							/* queue for flush */
 | 
				
			||||||
		if (list_empty(pending))
 | 
							if (list_empty(pending))
 | 
				
			||||||
			fq->flush_pending_since = jiffies;
 | 
								fq->flush_pending_since = jiffies;
 | 
				
			||||||
		list_move_tail(&rq->flush.list, pending);
 | 
							list_move_tail(&rq->queuelist, pending);
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	case REQ_FSEQ_DATA:
 | 
						case REQ_FSEQ_DATA:
 | 
				
			||||||
		list_del_init(&rq->flush.list);
 | 
					 | 
				
			||||||
		fq->flush_data_in_flight++;
 | 
							fq->flush_data_in_flight++;
 | 
				
			||||||
		spin_lock(&q->requeue_lock);
 | 
							spin_lock(&q->requeue_lock);
 | 
				
			||||||
		list_add(&rq->queuelist, &q->requeue_list);
 | 
							list_move(&rq->queuelist, &q->requeue_list);
 | 
				
			||||||
		spin_unlock(&q->requeue_lock);
 | 
							spin_unlock(&q->requeue_lock);
 | 
				
			||||||
		blk_mq_kick_requeue_list(q);
 | 
							blk_mq_kick_requeue_list(q);
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
| 
						 | 
					@ -202,7 +201,7 @@ static void blk_flush_complete_seq(struct request *rq,
 | 
				
			||||||
		 * flush data request completion path.  Restore @rq for
 | 
							 * flush data request completion path.  Restore @rq for
 | 
				
			||||||
		 * normal completion and end it.
 | 
							 * normal completion and end it.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		list_del_init(&rq->flush.list);
 | 
							list_del_init(&rq->queuelist);
 | 
				
			||||||
		blk_flush_restore_request(rq);
 | 
							blk_flush_restore_request(rq);
 | 
				
			||||||
		blk_mq_end_request(rq, error);
 | 
							blk_mq_end_request(rq, error);
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
| 
						 | 
					@ -258,7 +257,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
 | 
				
			||||||
	fq->flush_running_idx ^= 1;
 | 
						fq->flush_running_idx ^= 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* and push the waiting requests to the next stage */
 | 
						/* and push the waiting requests to the next stage */
 | 
				
			||||||
	list_for_each_entry_safe(rq, n, running, flush.list) {
 | 
						list_for_each_entry_safe(rq, n, running, queuelist) {
 | 
				
			||||||
		unsigned int seq = blk_flush_cur_seq(rq);
 | 
							unsigned int seq = blk_flush_cur_seq(rq);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
 | 
							BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
 | 
				
			||||||
| 
						 | 
					@ -292,7 +291,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
 | 
						struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
 | 
				
			||||||
	struct request *first_rq =
 | 
						struct request *first_rq =
 | 
				
			||||||
		list_first_entry(pending, struct request, flush.list);
 | 
							list_first_entry(pending, struct request, queuelist);
 | 
				
			||||||
	struct request *flush_rq = fq->flush_rq;
 | 
						struct request *flush_rq = fq->flush_rq;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* C1 described at the top of this file */
 | 
						/* C1 described at the top of this file */
 | 
				
			||||||
| 
						 | 
					@ -376,6 +375,11 @@ static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	spin_lock_irqsave(&fq->mq_flush_lock, flags);
 | 
						spin_lock_irqsave(&fq->mq_flush_lock, flags);
 | 
				
			||||||
	fq->flush_data_in_flight--;
 | 
						fq->flush_data_in_flight--;
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * May have been corrupted by rq->rq_next reuse, we need to
 | 
				
			||||||
 | 
						 * re-initialize rq->queuelist before reusing it here.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						INIT_LIST_HEAD(&rq->queuelist);
 | 
				
			||||||
	blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
 | 
						blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
 | 
				
			||||||
	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 | 
						spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -386,7 +390,6 @@ static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
 | 
				
			||||||
static void blk_rq_init_flush(struct request *rq)
 | 
					static void blk_rq_init_flush(struct request *rq)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	rq->flush.seq = 0;
 | 
						rq->flush.seq = 0;
 | 
				
			||||||
	INIT_LIST_HEAD(&rq->flush.list);
 | 
					 | 
				
			||||||
	rq->rq_flags |= RQF_FLUSH_SEQ;
 | 
						rq->rq_flags |= RQF_FLUSH_SEQ;
 | 
				
			||||||
	rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
 | 
						rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
 | 
				
			||||||
	rq->end_io = mq_flush_data_end_io;
 | 
						rq->end_io = mq_flush_data_end_io;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -178,7 +178,6 @@ struct request {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	struct {
 | 
						struct {
 | 
				
			||||||
		unsigned int		seq;
 | 
							unsigned int		seq;
 | 
				
			||||||
		struct list_head	list;
 | 
					 | 
				
			||||||
		rq_end_io_fn		*saved_end_io;
 | 
							rq_end_io_fn		*saved_end_io;
 | 
				
			||||||
	} flush;
 | 
						} flush;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue