mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	net/9p/trans_fd.c: fix race by holding the lock
It may be possible to run p9_fd_cancel() with a deleted req->req_list and incur in a double del. To fix hold the client->lock while changing the status, so the other threads will be synchronized. Link: http://lkml.kernel.org/r/20180723184253.6682-1-tomasbortoli@gmail.com Signed-off-by: Tomas Bortoli <tomasbortoli@gmail.com> Reported-by: syzbot+735d926e9d1317c3310c@syzkaller.appspotmail.com To: Eric Van Hensbergen <ericvh@gmail.com> To: Ron Minnich <rminnich@sandia.gov> To: Latchesar Ionkov <lucho@ionkov.net> Cc: Yiwen Jiang <jiangyiwen@huwei.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Dominique Martinet <dominique.martinet@cea.fr>
This commit is contained in:
		
							parent
							
								
									430ac66eb4
								
							
						
					
					
						commit
						9f476d7c54
					
				
					 1 changed files with 5 additions and 5 deletions
				
			
		| 
						 | 
				
			
			@ -199,15 +199,14 @@ static void p9_mux_poll_stop(struct p9_conn *m)
 | 
			
		|||
static void p9_conn_cancel(struct p9_conn *m, int err)
 | 
			
		||||
{
 | 
			
		||||
	struct p9_req_t *req, *rtmp;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
	LIST_HEAD(cancel_list);
 | 
			
		||||
 | 
			
		||||
	p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
 | 
			
		||||
 | 
			
		||||
	spin_lock_irqsave(&m->client->lock, flags);
 | 
			
		||||
	spin_lock(&m->client->lock);
 | 
			
		||||
 | 
			
		||||
	if (m->err) {
 | 
			
		||||
		spin_unlock_irqrestore(&m->client->lock, flags);
 | 
			
		||||
		spin_unlock(&m->client->lock);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -219,7 +218,6 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
 | 
			
		|||
	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
 | 
			
		||||
		list_move(&req->req_list, &cancel_list);
 | 
			
		||||
	}
 | 
			
		||||
	spin_unlock_irqrestore(&m->client->lock, flags);
 | 
			
		||||
 | 
			
		||||
	list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
 | 
			
		||||
		p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
 | 
			
		||||
| 
						 | 
				
			
			@ -228,6 +226,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
 | 
			
		|||
			req->t_err = err;
 | 
			
		||||
		p9_client_cb(m->client, req, REQ_STATUS_ERROR);
 | 
			
		||||
	}
 | 
			
		||||
	spin_unlock(&m->client->lock);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static __poll_t
 | 
			
		||||
| 
						 | 
				
			
			@ -375,8 +374,9 @@ static void p9_read_work(struct work_struct *work)
 | 
			
		|||
		if (m->req->status != REQ_STATUS_ERROR)
 | 
			
		||||
			status = REQ_STATUS_RCVD;
 | 
			
		||||
		list_del(&m->req->req_list);
 | 
			
		||||
		spin_unlock(&m->client->lock);
 | 
			
		||||
		/* update req->status while holding client->lock  */
 | 
			
		||||
		p9_client_cb(m->client, m->req, status);
 | 
			
		||||
		spin_unlock(&m->client->lock);
 | 
			
		||||
		m->rc.sdata = NULL;
 | 
			
		||||
		m->rc.offset = 0;
 | 
			
		||||
		m->rc.capacity = 0;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue