forked from mirrors/linux
		
	block: move io_context creation into where it's needed
The only user of the io_context for IO is BFQ, yet we put the checking and logic of it into the normal IO path. Put the creation into blk_mq_sched_assign_ioc(), and have BFQ use that helper. Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									48b5c1fbcd
								
							
						
					
					
						commit
						5a9d041ba2
					
				
					 4 changed files with 7 additions and 12 deletions
				
			
		| 
						 | 
				
			
			@ -6573,6 +6573,8 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
 | 
			
		|||
 */
 | 
			
		||||
static void bfq_prepare_request(struct request *rq)
 | 
			
		||||
{
 | 
			
		||||
	blk_mq_sched_assign_ioc(rq);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Regardless of whether we have an icq attached, we have to
 | 
			
		||||
	 * clear the scheduler pointers, as they might point to
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -750,15 +750,6 @@ noinline_for_stack bool submit_bio_checks(struct bio *bio)
 | 
			
		|||
		break;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Various block parts want %current->io_context, so allocate it up
 | 
			
		||||
	 * front rather than dealing with lots of pain to allocate it only
 | 
			
		||||
	 * where needed. This may fail and the block layer knows how to live
 | 
			
		||||
	 * with it.
 | 
			
		||||
	 */
 | 
			
		||||
	if (unlikely(!current->io_context))
 | 
			
		||||
		create_task_io_context(current, GFP_ATOMIC, q->node);
 | 
			
		||||
 | 
			
		||||
	if (blk_throtl_bio(bio))
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -24,6 +24,10 @@ void blk_mq_sched_assign_ioc(struct request *rq)
 | 
			
		|||
	struct io_context *ioc;
 | 
			
		||||
	struct io_cq *icq;
 | 
			
		||||
 | 
			
		||||
	/* create task io_context, if we don't have one already */
 | 
			
		||||
	if (unlikely(!current->io_context))
 | 
			
		||||
		create_task_io_context(current, GFP_ATOMIC, q->node);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * May not have an IO context if it's a passthrough request
 | 
			
		||||
	 */
 | 
			
		||||
| 
						 | 
				
			
			@ -43,6 +47,7 @@ void blk_mq_sched_assign_ioc(struct request *rq)
 | 
			
		|||
	get_io_context(icq->ioc);
 | 
			
		||||
	rq->elv.icq = icq;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(blk_mq_sched_assign_ioc);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Mark a hardware queue as needing a restart. For shared queues, maintain
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -406,9 +406,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
 | 
			
		|||
 | 
			
		||||
		if (!op_is_flush(data->cmd_flags) &&
 | 
			
		||||
		    e->type->ops.prepare_request) {
 | 
			
		||||
			if (e->type->icq_cache)
 | 
			
		||||
				blk_mq_sched_assign_ioc(rq);
 | 
			
		||||
 | 
			
		||||
			e->type->ops.prepare_request(rq);
 | 
			
		||||
			rq->rq_flags |= RQF_ELVPRIV;
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue