mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	nvme-tcp: fix timeout handler
Currently, we have several problems with the timeout handler: 1. If we timeout on the controller establishment flow, we will hang because we don't execute the error recovery (and we shouldn't because the create_ctrl flow needs to fail and cleanup on its own) 2. We might also hang if we get a disconnet on a queue while the controller is already deleting. This racy flow can cause the controller disable/shutdown admin command to hang. We cannot complete a timed out request from the timeout handler without mutual exclusion from the teardown flow (e.g. nvme_rdma_error_recovery_work). So we serialize it in the timeout handler and teardown io and admin queues to guarantee that no one races with us from completing the request. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									4c174e6366
								
							
						
					
					
						commit
						39d5775746
					
				
					 1 changed files with 11 additions and 8 deletions
				
			
		| 
						 | 
					@ -1948,20 +1948,23 @@ nvme_tcp_timeout(struct request *rq, bool reserved)
 | 
				
			||||||
	struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
 | 
						struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
 | 
				
			||||||
	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
 | 
						struct nvme_tcp_cmd_pdu *pdu = req->pdu;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dev_dbg(ctrl->ctrl.device,
 | 
						dev_warn(ctrl->ctrl.device,
 | 
				
			||||||
		"queue %d: timeout request %#x type %d\n",
 | 
							"queue %d: timeout request %#x type %d\n",
 | 
				
			||||||
		nvme_tcp_queue_id(req->queue), rq->tag,
 | 
							nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
 | 
				
			||||||
		pdu->hdr.type);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
 | 
						if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
 | 
				
			||||||
		union nvme_result res = {};
 | 
							/*
 | 
				
			||||||
 | 
							 * Teardown immediately if controller times out while starting
 | 
				
			||||||
		nvme_req(rq)->flags |= NVME_REQ_CANCELLED;
 | 
							 * or we are already started error recovery. all outstanding
 | 
				
			||||||
		nvme_end_request(rq, cpu_to_le16(NVME_SC_ABORT_REQ), res);
 | 
							 * requests are completed on shutdown, so we return BLK_EH_DONE.
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							flush_work(&ctrl->err_work);
 | 
				
			||||||
 | 
							nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
 | 
				
			||||||
 | 
							nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
 | 
				
			||||||
		return BLK_EH_DONE;
 | 
							return BLK_EH_DONE;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* queue error recovery */
 | 
						dev_warn(ctrl->ctrl.device, "starting error recovery\n");
 | 
				
			||||||
	nvme_tcp_error_recovery(&ctrl->ctrl);
 | 
						nvme_tcp_error_recovery(&ctrl->ctrl);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return BLK_EH_RESET_TIMER;
 | 
						return BLK_EH_RESET_TIMER;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue