forked from mirrors/linux
		
	blk-mq: introduce blk_mq_delay_kick_requeue_list()
blk_mq_delay_kick_requeue_list() provides the ability to kick the q->requeue_list after a specified time. To do this the request_queue's 'requeue_work' member was changed to a delayed_work. blk_mq_delay_kick_requeue_list() allows DM to defer processing requeued requests while it doesn't make sense to immediately requeue them (e.g. when all paths in a DM multipath have failed). Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
		
							parent
							
								
									c5c5ca7774
								
							
						
					
					
						commit
						2849450ad3
					
				
					 3 changed files with 14 additions and 5 deletions
				
			
		| 
						 | 
				
			
			@ -502,7 +502,7 @@ EXPORT_SYMBOL(blk_mq_requeue_request);
 | 
			
		|||
static void blk_mq_requeue_work(struct work_struct *work)
 | 
			
		||||
{
 | 
			
		||||
	struct request_queue *q =
 | 
			
		||||
		container_of(work, struct request_queue, requeue_work);
 | 
			
		||||
		container_of(work, struct request_queue, requeue_work.work);
 | 
			
		||||
	LIST_HEAD(rq_list);
 | 
			
		||||
	struct request *rq, *next;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
| 
						 | 
				
			
			@ -557,16 +557,24 @@ EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
 | 
			
		|||
 | 
			
		||||
void blk_mq_cancel_requeue_work(struct request_queue *q)
 | 
			
		||||
{
 | 
			
		||||
	cancel_work_sync(&q->requeue_work);
 | 
			
		||||
	cancel_delayed_work_sync(&q->requeue_work);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
 | 
			
		||||
 | 
			
		||||
void blk_mq_kick_requeue_list(struct request_queue *q)
 | 
			
		||||
{
 | 
			
		||||
	kblockd_schedule_work(&q->requeue_work);
 | 
			
		||||
	kblockd_schedule_delayed_work(&q->requeue_work, 0);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(blk_mq_kick_requeue_list);
 | 
			
		||||
 | 
			
		||||
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
 | 
			
		||||
				    unsigned long msecs)
 | 
			
		||||
{
 | 
			
		||||
	kblockd_schedule_delayed_work(&q->requeue_work,
 | 
			
		||||
				      msecs_to_jiffies(msecs));
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
 | 
			
		||||
 | 
			
		||||
void blk_mq_abort_requeue_list(struct request_queue *q)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
| 
						 | 
				
			
			@ -2084,7 +2092,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 | 
			
		|||
 | 
			
		||||
	q->sg_reserved_size = INT_MAX;
 | 
			
		||||
 | 
			
		||||
	INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
 | 
			
		||||
	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
 | 
			
		||||
	INIT_LIST_HEAD(&q->requeue_list);
 | 
			
		||||
	spin_lock_init(&q->requeue_lock);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -233,6 +233,7 @@ void blk_mq_requeue_request(struct request *rq);
 | 
			
		|||
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
 | 
			
		||||
void blk_mq_cancel_requeue_work(struct request_queue *q);
 | 
			
		||||
void blk_mq_kick_requeue_list(struct request_queue *q);
 | 
			
		||||
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
 | 
			
		||||
void blk_mq_abort_requeue_list(struct request_queue *q);
 | 
			
		||||
void blk_mq_complete_request(struct request *rq, int error);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -449,7 +449,7 @@ struct request_queue {
 | 
			
		|||
 | 
			
		||||
	struct list_head	requeue_list;
 | 
			
		||||
	spinlock_t		requeue_lock;
 | 
			
		||||
	struct work_struct	requeue_work;
 | 
			
		||||
	struct delayed_work	requeue_work;
 | 
			
		||||
 | 
			
		||||
	struct mutex		sysfs_lock;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue