mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	blk-mq-sched: refactor scheduler initialization
Preparation cleanup for the next couple of fixes, push blk_mq_sched_setup() and e->ops.mq.init_sched() into a helper. Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
		
							parent
							
								
									81380ca107
								
							
						
					
					
						commit
						6917ff0b5b
					
				
					 3 changed files with 59 additions and 61 deletions
				
			
		| 
						 | 
					@ -432,53 +432,25 @@ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int blk_mq_sched_setup(struct request_queue *q)
 | 
					static int blk_mq_sched_alloc_tags(struct request_queue *q,
 | 
				
			||||||
 | 
									   struct blk_mq_hw_ctx *hctx,
 | 
				
			||||||
 | 
									   unsigned int hctx_idx)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct blk_mq_tag_set *set = q->tag_set;
 | 
						struct blk_mq_tag_set *set = q->tag_set;
 | 
				
			||||||
	struct blk_mq_hw_ctx *hctx;
 | 
						int ret;
 | 
				
			||||||
	int ret, i;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
 | 
				
			||||||
	 * Default to 256, since we don't split into sync/async like the
 | 
										       set->reserved_tags);
 | 
				
			||||||
	 * old code did. Additionally, this is a per-hw queue depth.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	q->nr_requests = 2 * BLKDEV_MAX_RQ;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * We're switching to using an IO scheduler, so setup the hctx
 | 
					 | 
				
			||||||
	 * scheduler tags and switch the request map from the regular
 | 
					 | 
				
			||||||
	 * tags to scheduler tags. First allocate what we need, so we
 | 
					 | 
				
			||||||
	 * can safely fail and fallback, if needed.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	ret = 0;
 | 
					 | 
				
			||||||
	queue_for_each_hw_ctx(q, hctx, i) {
 | 
					 | 
				
			||||||
		hctx->sched_tags = blk_mq_alloc_rq_map(set, i,
 | 
					 | 
				
			||||||
				q->nr_requests, set->reserved_tags);
 | 
					 | 
				
			||||||
		if (!hctx->sched_tags) {
 | 
					 | 
				
			||||||
			ret = -ENOMEM;
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		ret = blk_mq_alloc_rqs(set, hctx->sched_tags, i, q->nr_requests);
 | 
					 | 
				
			||||||
		if (ret)
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * If we failed, free what we did allocate
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (ret) {
 | 
					 | 
				
			||||||
		queue_for_each_hw_ctx(q, hctx, i) {
 | 
					 | 
				
			||||||
	if (!hctx->sched_tags)
 | 
						if (!hctx->sched_tags)
 | 
				
			||||||
				continue;
 | 
							return -ENOMEM;
 | 
				
			||||||
			blk_mq_sched_free_tags(set, hctx, i);
 | 
					
 | 
				
			||||||
		}
 | 
						ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
 | 
				
			||||||
 | 
						if (ret)
 | 
				
			||||||
 | 
							blk_mq_sched_free_tags(set, hctx, hctx_idx);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void blk_mq_sched_teardown(struct request_queue *q)
 | 
					void blk_mq_sched_teardown(struct request_queue *q)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct blk_mq_tag_set *set = q->tag_set;
 | 
						struct blk_mq_tag_set *set = q->tag_set;
 | 
				
			||||||
| 
						 | 
					@ -489,6 +461,40 @@ void blk_mq_sched_teardown(struct request_queue *q)
 | 
				
			||||||
		blk_mq_sched_free_tags(set, hctx, i);
 | 
							blk_mq_sched_free_tags(set, hctx, i);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct blk_mq_hw_ctx *hctx;
 | 
				
			||||||
 | 
						unsigned int i;
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!e) {
 | 
				
			||||||
 | 
							q->elevator = NULL;
 | 
				
			||||||
 | 
							return 0;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Default to 256, since we don't split into sync/async like the
 | 
				
			||||||
 | 
						 * old code did. Additionally, this is a per-hw queue depth.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						q->nr_requests = 2 * BLKDEV_MAX_RQ;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						queue_for_each_hw_ctx(q, hctx, i) {
 | 
				
			||||||
 | 
							ret = blk_mq_sched_alloc_tags(q, hctx, i);
 | 
				
			||||||
 | 
							if (ret)
 | 
				
			||||||
 | 
								goto err;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = e->ops.mq.init_sched(q, e);
 | 
				
			||||||
 | 
						if (ret)
 | 
				
			||||||
 | 
							goto err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					err:
 | 
				
			||||||
 | 
						blk_mq_sched_teardown(q);
 | 
				
			||||||
 | 
						return ret;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int blk_mq_sched_init(struct request_queue *q)
 | 
					int blk_mq_sched_init(struct request_queue *q)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -32,7 +32,7 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
 | 
				
			||||||
			struct list_head *rq_list,
 | 
								struct list_head *rq_list,
 | 
				
			||||||
			struct request *(*get_rq)(struct blk_mq_hw_ctx *));
 | 
								struct request *(*get_rq)(struct blk_mq_hw_ctx *));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int blk_mq_sched_setup(struct request_queue *q);
 | 
					int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
 | 
				
			||||||
void blk_mq_sched_teardown(struct request_queue *q);
 | 
					void blk_mq_sched_teardown(struct request_queue *q);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int blk_mq_sched_init(struct request_queue *q);
 | 
					int blk_mq_sched_init(struct request_queue *q);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -242,17 +242,12 @@ int elevator_init(struct request_queue *q, char *name)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (e->uses_mq) {
 | 
					 | 
				
			||||||
		err = blk_mq_sched_setup(q);
 | 
					 | 
				
			||||||
		if (!err)
 | 
					 | 
				
			||||||
			err = e->ops.mq.init_sched(q, e);
 | 
					 | 
				
			||||||
	} else
 | 
					 | 
				
			||||||
		err = e->ops.sq.elevator_init_fn(q, e);
 | 
					 | 
				
			||||||
	if (err) {
 | 
					 | 
				
			||||||
	if (e->uses_mq)
 | 
						if (e->uses_mq)
 | 
				
			||||||
			blk_mq_sched_teardown(q);
 | 
							err = blk_mq_init_sched(q, e);
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							err = e->ops.sq.elevator_init_fn(q, e);
 | 
				
			||||||
 | 
						if (err)
 | 
				
			||||||
		elevator_put(e);
 | 
							elevator_put(e);
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return err;
 | 
						return err;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(elevator_init);
 | 
					EXPORT_SYMBOL(elevator_init);
 | 
				
			||||||
| 
						 | 
					@ -987,21 +982,18 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* allocate, init and register new elevator */
 | 
						/* allocate, init and register new elevator */
 | 
				
			||||||
	if (new_e) {
 | 
						if (q->mq_ops)
 | 
				
			||||||
		if (new_e->uses_mq) {
 | 
							err = blk_mq_init_sched(q, new_e);
 | 
				
			||||||
			err = blk_mq_sched_setup(q);
 | 
						else
 | 
				
			||||||
			if (!err)
 | 
					 | 
				
			||||||
				err = new_e->ops.mq.init_sched(q, new_e);
 | 
					 | 
				
			||||||
		} else
 | 
					 | 
				
			||||||
		err = new_e->ops.sq.elevator_init_fn(q, new_e);
 | 
							err = new_e->ops.sq.elevator_init_fn(q, new_e);
 | 
				
			||||||
	if (err)
 | 
						if (err)
 | 
				
			||||||
		goto fail_init;
 | 
							goto fail_init;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (new_e) {
 | 
				
			||||||
		err = elv_register_queue(q);
 | 
							err = elv_register_queue(q);
 | 
				
			||||||
		if (err)
 | 
							if (err)
 | 
				
			||||||
			goto fail_register;
 | 
								goto fail_register;
 | 
				
			||||||
	} else
 | 
						}
 | 
				
			||||||
		q->elevator = NULL;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* done, kill the old one and finish */
 | 
						/* done, kill the old one and finish */
 | 
				
			||||||
	if (old) {
 | 
						if (old) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue