mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	net/9p/virtio: Fix hard lockup in req_done
When client has multiple threads that issue io requests all the time, and the server has a very good performance, it may cause cpu is running in the irq context for a long time because it can check virtqueue has buf in the *while* loop. So we should keep chan->lock in the whole loop. [ Dominique: reworded subject line ] Link: http://lkml.kernel.org/r/5B503AEC.5080404@huawei.com Signed-off-by: Yiwen Jiang <jiangyiwen@huawei.com> To: Andrew Morton <akpm@linux-foundation.org> To: Eric Van Hensbergen <ericvh@gmail.com> To: Ron Minnich <rminnich@sandia.gov> To: Latchesar Ionkov <lucho@ionkov.net> Signed-off-by: Dominique Martinet <dominique.martinet@cea.fr>
This commit is contained in:
		
							parent
							
								
									c7ebbae7cf
								
							
						
					
					
						commit
						31934da810
					
				
					 1 changed files with 11 additions and 10 deletions
				
			
		| 
						 | 
					@ -144,24 +144,25 @@ static void req_done(struct virtqueue *vq)
 | 
				
			||||||
	struct virtio_chan *chan = vq->vdev->priv;
 | 
						struct virtio_chan *chan = vq->vdev->priv;
 | 
				
			||||||
	unsigned int len;
 | 
						unsigned int len;
 | 
				
			||||||
	struct p9_req_t *req;
 | 
						struct p9_req_t *req;
 | 
				
			||||||
 | 
						bool need_wakeup = false;
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	p9_debug(P9_DEBUG_TRANS, ": request done\n");
 | 
						p9_debug(P9_DEBUG_TRANS, ": request done\n");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while (1) {
 | 
					 | 
				
			||||||
	spin_lock_irqsave(&chan->lock, flags);
 | 
						spin_lock_irqsave(&chan->lock, flags);
 | 
				
			||||||
		req = virtqueue_get_buf(chan->vq, &len);
 | 
						while ((req = virtqueue_get_buf(chan->vq, &len)) != NULL) {
 | 
				
			||||||
		if (req == NULL) {
 | 
							if (!chan->ring_bufs_avail) {
 | 
				
			||||||
			spin_unlock_irqrestore(&chan->lock, flags);
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
			chan->ring_bufs_avail = 1;
 | 
								chan->ring_bufs_avail = 1;
 | 
				
			||||||
		spin_unlock_irqrestore(&chan->lock, flags);
 | 
								need_wakeup = true;
 | 
				
			||||||
		/* Wakeup if anyone waiting for VirtIO ring space. */
 | 
							}
 | 
				
			||||||
		wake_up(chan->vc_wq);
 | 
					
 | 
				
			||||||
		if (len)
 | 
							if (len)
 | 
				
			||||||
			p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
 | 
								p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						spin_unlock_irqrestore(&chan->lock, flags);
 | 
				
			||||||
 | 
						/* Wakeup if anyone waiting for VirtIO ring space. */
 | 
				
			||||||
 | 
						if (need_wakeup)
 | 
				
			||||||
 | 
							wake_up(chan->vc_wq);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue