mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	SUNRPC: Remove the bh-safe lock requirement on the rpc_wait_queue->lock
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
This commit is contained in:
		
							parent
							
								
									b5e924191f
								
							
						
					
					
						commit
						c049f8ea9a
					
				
					 1 changed files with 25 additions and 25 deletions
				
			
		| 
						 | 
					@ -432,9 +432,9 @@ void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Protect the queue operations.
 | 
						 * Protect the queue operations.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	spin_lock_bh(&q->lock);
 | 
						spin_lock(&q->lock);
 | 
				
			||||||
	__rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
 | 
						__rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
 | 
				
			||||||
	spin_unlock_bh(&q->lock);
 | 
						spin_unlock(&q->lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout);
 | 
					EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -450,9 +450,9 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Protect the queue operations.
 | 
						 * Protect the queue operations.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	spin_lock_bh(&q->lock);
 | 
						spin_lock(&q->lock);
 | 
				
			||||||
	__rpc_sleep_on_priority(q, task, task->tk_priority);
 | 
						__rpc_sleep_on_priority(q, task, task->tk_priority);
 | 
				
			||||||
	spin_unlock_bh(&q->lock);
 | 
						spin_unlock(&q->lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(rpc_sleep_on);
 | 
					EXPORT_SYMBOL_GPL(rpc_sleep_on);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -466,9 +466,9 @@ void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Protect the queue operations.
 | 
						 * Protect the queue operations.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	spin_lock_bh(&q->lock);
 | 
						spin_lock(&q->lock);
 | 
				
			||||||
	__rpc_sleep_on_priority_timeout(q, task, timeout, priority);
 | 
						__rpc_sleep_on_priority_timeout(q, task, timeout, priority);
 | 
				
			||||||
	spin_unlock_bh(&q->lock);
 | 
						spin_unlock(&q->lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout);
 | 
					EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -483,9 +483,9 @@ void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Protect the queue operations.
 | 
						 * Protect the queue operations.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	spin_lock_bh(&q->lock);
 | 
						spin_lock(&q->lock);
 | 
				
			||||||
	__rpc_sleep_on_priority(q, task, priority);
 | 
						__rpc_sleep_on_priority(q, task, priority);
 | 
				
			||||||
	spin_unlock_bh(&q->lock);
 | 
						spin_unlock(&q->lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
 | 
					EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -563,9 +563,9 @@ void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (!RPC_IS_QUEUED(task))
 | 
						if (!RPC_IS_QUEUED(task))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	spin_lock_bh(&queue->lock);
 | 
						spin_lock(&queue->lock);
 | 
				
			||||||
	rpc_wake_up_task_on_wq_queue_locked(wq, queue, task);
 | 
						rpc_wake_up_task_on_wq_queue_locked(wq, queue, task);
 | 
				
			||||||
	spin_unlock_bh(&queue->lock);
 | 
						spin_unlock(&queue->lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -575,9 +575,9 @@ void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (!RPC_IS_QUEUED(task))
 | 
						if (!RPC_IS_QUEUED(task))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	spin_lock_bh(&queue->lock);
 | 
						spin_lock(&queue->lock);
 | 
				
			||||||
	rpc_wake_up_task_queue_locked(queue, task);
 | 
						rpc_wake_up_task_queue_locked(queue, task);
 | 
				
			||||||
	spin_unlock_bh(&queue->lock);
 | 
						spin_unlock(&queue->lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
 | 
					EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -610,9 +610,9 @@ rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (!RPC_IS_QUEUED(task))
 | 
						if (!RPC_IS_QUEUED(task))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	spin_lock_bh(&queue->lock);
 | 
						spin_lock(&queue->lock);
 | 
				
			||||||
	rpc_wake_up_task_queue_set_status_locked(queue, task, status);
 | 
						rpc_wake_up_task_queue_set_status_locked(queue, task, status);
 | 
				
			||||||
	spin_unlock_bh(&queue->lock);
 | 
						spin_unlock(&queue->lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -675,12 +675,12 @@ struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dprintk("RPC:       wake_up_first(%p \"%s\")\n",
 | 
						dprintk("RPC:       wake_up_first(%p \"%s\")\n",
 | 
				
			||||||
			queue, rpc_qname(queue));
 | 
								queue, rpc_qname(queue));
 | 
				
			||||||
	spin_lock_bh(&queue->lock);
 | 
						spin_lock(&queue->lock);
 | 
				
			||||||
	task = __rpc_find_next_queued(queue);
 | 
						task = __rpc_find_next_queued(queue);
 | 
				
			||||||
	if (task != NULL)
 | 
						if (task != NULL)
 | 
				
			||||||
		task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
 | 
							task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
 | 
				
			||||||
				task, func, data);
 | 
									task, func, data);
 | 
				
			||||||
	spin_unlock_bh(&queue->lock);
 | 
						spin_unlock(&queue->lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return task;
 | 
						return task;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -719,7 +719,7 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct list_head *head;
 | 
						struct list_head *head;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock_bh(&queue->lock);
 | 
						spin_lock(&queue->lock);
 | 
				
			||||||
	head = &queue->tasks[queue->maxpriority];
 | 
						head = &queue->tasks[queue->maxpriority];
 | 
				
			||||||
	for (;;) {
 | 
						for (;;) {
 | 
				
			||||||
		while (!list_empty(head)) {
 | 
							while (!list_empty(head)) {
 | 
				
			||||||
| 
						 | 
					@ -733,7 +733,7 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		head--;
 | 
							head--;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	spin_unlock_bh(&queue->lock);
 | 
						spin_unlock(&queue->lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(rpc_wake_up);
 | 
					EXPORT_SYMBOL_GPL(rpc_wake_up);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -748,7 +748,7 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct list_head *head;
 | 
						struct list_head *head;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock_bh(&queue->lock);
 | 
						spin_lock(&queue->lock);
 | 
				
			||||||
	head = &queue->tasks[queue->maxpriority];
 | 
						head = &queue->tasks[queue->maxpriority];
 | 
				
			||||||
	for (;;) {
 | 
						for (;;) {
 | 
				
			||||||
		while (!list_empty(head)) {
 | 
							while (!list_empty(head)) {
 | 
				
			||||||
| 
						 | 
					@ -763,7 +763,7 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		head--;
 | 
							head--;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	spin_unlock_bh(&queue->lock);
 | 
						spin_unlock(&queue->lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(rpc_wake_up_status);
 | 
					EXPORT_SYMBOL_GPL(rpc_wake_up_status);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -775,7 +775,7 @@ static void __rpc_queue_timer_fn(struct work_struct *work)
 | 
				
			||||||
	struct rpc_task *task, *n;
 | 
						struct rpc_task *task, *n;
 | 
				
			||||||
	unsigned long expires, now, timeo;
 | 
						unsigned long expires, now, timeo;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock_bh(&queue->lock);
 | 
						spin_lock(&queue->lock);
 | 
				
			||||||
	expires = now = jiffies;
 | 
						expires = now = jiffies;
 | 
				
			||||||
	list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
 | 
						list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
 | 
				
			||||||
		timeo = task->tk_timeout;
 | 
							timeo = task->tk_timeout;
 | 
				
			||||||
| 
						 | 
					@ -790,7 +790,7 @@ static void __rpc_queue_timer_fn(struct work_struct *work)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if (!list_empty(&queue->timer_list.list))
 | 
						if (!list_empty(&queue->timer_list.list))
 | 
				
			||||||
		rpc_set_queue_timer(queue, expires);
 | 
							rpc_set_queue_timer(queue, expires);
 | 
				
			||||||
	spin_unlock_bh(&queue->lock);
 | 
						spin_unlock(&queue->lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void __rpc_atrun(struct rpc_task *task)
 | 
					static void __rpc_atrun(struct rpc_task *task)
 | 
				
			||||||
| 
						 | 
					@ -937,13 +937,13 @@ static void __rpc_execute(struct rpc_task *task)
 | 
				
			||||||
		 * rpc_task pointer may still be dereferenced.
 | 
							 * rpc_task pointer may still be dereferenced.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		queue = task->tk_waitqueue;
 | 
							queue = task->tk_waitqueue;
 | 
				
			||||||
		spin_lock_bh(&queue->lock);
 | 
							spin_lock(&queue->lock);
 | 
				
			||||||
		if (!RPC_IS_QUEUED(task)) {
 | 
							if (!RPC_IS_QUEUED(task)) {
 | 
				
			||||||
			spin_unlock_bh(&queue->lock);
 | 
								spin_unlock(&queue->lock);
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		rpc_clear_running(task);
 | 
							rpc_clear_running(task);
 | 
				
			||||||
		spin_unlock_bh(&queue->lock);
 | 
							spin_unlock(&queue->lock);
 | 
				
			||||||
		if (task_is_async)
 | 
							if (task_is_async)
 | 
				
			||||||
			return;
 | 
								return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue