forked from mirrors/linux
		
	sbitmap: fix batched wait_cnt accounting
Batched completions can clear multiple bits, but we're only decrementing the wait_cnt by one each time. This can cause waiters to never be woken, stalling IO. Use the batched count instead. Link: https://bugzilla.kernel.org/show_bug.cgi?id=215679 Signed-off-by: Keith Busch <kbusch@kernel.org> Link: https://lore.kernel.org/r/20220909184022.1709476-1-kbusch@fb.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									c35227d4e8
								
							
						
					
					
						commit
						4acb83417c
					
				
					 3 changed files with 26 additions and 16 deletions
				
			
		| 
						 | 
					@ -196,7 +196,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
 | 
				
			||||||
		 * other allocations on previous queue won't be starved.
 | 
							 * other allocations on previous queue won't be starved.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (bt != bt_prev)
 | 
							if (bt != bt_prev)
 | 
				
			||||||
			sbitmap_queue_wake_up(bt_prev);
 | 
								sbitmap_queue_wake_up(bt_prev, 1);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ws = bt_wait_ptr(bt, data->hctx);
 | 
							ws = bt_wait_ptr(bt, data->hctx);
 | 
				
			||||||
	} while (1);
 | 
						} while (1);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -575,8 +575,9 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
 | 
				
			||||||
 * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
 | 
					 * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
 | 
				
			||||||
 * on a &struct sbitmap_queue.
 | 
					 * on a &struct sbitmap_queue.
 | 
				
			||||||
 * @sbq: Bitmap queue to wake up.
 | 
					 * @sbq: Bitmap queue to wake up.
 | 
				
			||||||
 | 
					 * @nr: Number of bits cleared.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
 | 
					void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
 | 
					 * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -599,24 +599,31 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
 | 
				
			||||||
	return NULL;
 | 
						return NULL;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static bool __sbq_wake_up(struct sbitmap_queue *sbq)
 | 
					static bool __sbq_wake_up(struct sbitmap_queue *sbq, int *nr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct sbq_wait_state *ws;
 | 
						struct sbq_wait_state *ws;
 | 
				
			||||||
	unsigned int wake_batch;
 | 
						unsigned int wake_batch;
 | 
				
			||||||
	int wait_cnt;
 | 
						int wait_cnt, cur, sub;
 | 
				
			||||||
	bool ret;
 | 
						bool ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (*nr <= 0)
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ws = sbq_wake_ptr(sbq);
 | 
						ws = sbq_wake_ptr(sbq);
 | 
				
			||||||
	if (!ws)
 | 
						if (!ws)
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	wait_cnt = atomic_dec_return(&ws->wait_cnt);
 | 
						cur = atomic_read(&ws->wait_cnt);
 | 
				
			||||||
 | 
						do {
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
	 * For concurrent callers of this, callers should call this function
 | 
							 * For concurrent callers of this, callers should call this
 | 
				
			||||||
	 * again to wakeup a new batch on a different 'ws'.
 | 
							 * function again to wakeup a new batch on a different 'ws'.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
	if (wait_cnt < 0)
 | 
							if (cur == 0)
 | 
				
			||||||
			return true;
 | 
								return true;
 | 
				
			||||||
 | 
							sub = min(*nr, cur);
 | 
				
			||||||
 | 
							wait_cnt = cur - sub;
 | 
				
			||||||
 | 
						} while (!atomic_try_cmpxchg(&ws->wait_cnt, &cur, wait_cnt));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * If we decremented queue without waiters, retry to avoid lost
 | 
						 * If we decremented queue without waiters, retry to avoid lost
 | 
				
			||||||
| 
						 | 
					@ -625,6 +632,8 @@ static bool __sbq_wake_up(struct sbitmap_queue *sbq)
 | 
				
			||||||
	if (wait_cnt > 0)
 | 
						if (wait_cnt > 0)
 | 
				
			||||||
		return !waitqueue_active(&ws->wait);
 | 
							return !waitqueue_active(&ws->wait);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						*nr -= sub;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * When wait_cnt == 0, we have to be particularly careful as we are
 | 
						 * When wait_cnt == 0, we have to be particularly careful as we are
 | 
				
			||||||
	 * responsible to reset wait_cnt regardless whether we've actually
 | 
						 * responsible to reset wait_cnt regardless whether we've actually
 | 
				
			||||||
| 
						 | 
					@ -660,12 +669,12 @@ static bool __sbq_wake_up(struct sbitmap_queue *sbq)
 | 
				
			||||||
	sbq_index_atomic_inc(&sbq->wake_index);
 | 
						sbq_index_atomic_inc(&sbq->wake_index);
 | 
				
			||||||
	atomic_set(&ws->wait_cnt, wake_batch);
 | 
						atomic_set(&ws->wait_cnt, wake_batch);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return ret;
 | 
						return ret || *nr;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
 | 
					void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	while (__sbq_wake_up(sbq))
 | 
						while (__sbq_wake_up(sbq, &nr))
 | 
				
			||||||
		;
 | 
							;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
 | 
					EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
 | 
				
			||||||
| 
						 | 
					@ -705,7 +714,7 @@ void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
 | 
				
			||||||
		atomic_long_andnot(mask, (atomic_long_t *) addr);
 | 
							atomic_long_andnot(mask, (atomic_long_t *) addr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	smp_mb__after_atomic();
 | 
						smp_mb__after_atomic();
 | 
				
			||||||
	sbitmap_queue_wake_up(sbq);
 | 
						sbitmap_queue_wake_up(sbq, nr_tags);
 | 
				
			||||||
	sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
 | 
						sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
 | 
				
			||||||
					tags[nr_tags - 1] - offset);
 | 
										tags[nr_tags - 1] - offset);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -733,7 +742,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
 | 
				
			||||||
	 * waiter. See the comment on waitqueue_active().
 | 
						 * waiter. See the comment on waitqueue_active().
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	smp_mb__after_atomic();
 | 
						smp_mb__after_atomic();
 | 
				
			||||||
	sbitmap_queue_wake_up(sbq);
 | 
						sbitmap_queue_wake_up(sbq, 1);
 | 
				
			||||||
	sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
 | 
						sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
 | 
					EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue