forked from mirrors/linux
		
	mptcp: handle correctly disconnect() failures
Currently the mptcp code has assumes that disconnect() can fail only
at mptcp_sendmsg_fastopen() time - to avoid a deadlock scenario - and
don't even bother returning an error code.
Soon mptcp_disconnect() will handle more error conditions: let's track
them explicitly.
As a bonus, explicitly annotate TCP-level disconnect as not failing:
the mptcp code never blocks for event on the subflows.
Fixes: 7d803344fd ("mptcp: fix deadlock in fastopen error path")
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Tested-by: Christoph Paasch <cpaasch@apple.com>
Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
			
			
This commit is contained in:
		
							parent
							
								
									59bb14bda2
								
							
						
					
					
						commit
						c2b2ae3925
					
				
					 1 changed files with 14 additions and 6 deletions
				
			
		|  | @ -1727,7 +1727,13 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, | ||||||
| 		if (ret && ret != -EINPROGRESS && ret != -ERESTARTSYS && ret != -EINTR) | 		if (ret && ret != -EINPROGRESS && ret != -ERESTARTSYS && ret != -EINTR) | ||||||
| 			*copied_syn = 0; | 			*copied_syn = 0; | ||||||
| 	} else if (ret && ret != -EINPROGRESS) { | 	} else if (ret && ret != -EINPROGRESS) { | ||||||
| 		mptcp_disconnect(sk, 0); | 		/* The disconnect() op called by tcp_sendmsg_fastopen()/
 | ||||||
|  | 		 * __inet_stream_connect() can fail, due to looking check, | ||||||
|  | 		 * see mptcp_disconnect(). | ||||||
|  | 		 * Attempt it again outside the problematic scope. | ||||||
|  | 		 */ | ||||||
|  | 		if (!mptcp_disconnect(sk, 0)) | ||||||
|  | 			sk->sk_socket->state = SS_UNCONNECTED; | ||||||
| 	} | 	} | ||||||
| 	inet_sk(sk)->defer_connect = 0; | 	inet_sk(sk)->defer_connect = 0; | ||||||
| 
 | 
 | ||||||
|  | @ -2389,7 +2395,10 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, | ||||||
| 
 | 
 | ||||||
| 	need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk); | 	need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk); | ||||||
| 	if (!dispose_it) { | 	if (!dispose_it) { | ||||||
| 		tcp_disconnect(ssk, 0); | 		/* The MPTCP code never wait on the subflow sockets, TCP-level
 | ||||||
|  | 		 * disconnect should never fail | ||||||
|  | 		 */ | ||||||
|  | 		WARN_ON_ONCE(tcp_disconnect(ssk, 0)); | ||||||
| 		msk->subflow->state = SS_UNCONNECTED; | 		msk->subflow->state = SS_UNCONNECTED; | ||||||
| 		mptcp_subflow_ctx_reset(subflow); | 		mptcp_subflow_ctx_reset(subflow); | ||||||
| 		release_sock(ssk); | 		release_sock(ssk); | ||||||
|  | @ -2812,7 +2821,7 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) | ||||||
| 			break; | 			break; | ||||||
| 		fallthrough; | 		fallthrough; | ||||||
| 	case TCP_SYN_SENT: | 	case TCP_SYN_SENT: | ||||||
| 		tcp_disconnect(ssk, O_NONBLOCK); | 		WARN_ON_ONCE(tcp_disconnect(ssk, O_NONBLOCK)); | ||||||
| 		break; | 		break; | ||||||
| 	default: | 	default: | ||||||
| 		if (__mptcp_check_fallback(mptcp_sk(sk))) { | 		if (__mptcp_check_fallback(mptcp_sk(sk))) { | ||||||
|  | @ -3075,11 +3084,10 @@ static int mptcp_disconnect(struct sock *sk, int flags) | ||||||
| 
 | 
 | ||||||
| 	/* We are on the fastopen error path. We can't call straight into the
 | 	/* We are on the fastopen error path. We can't call straight into the
 | ||||||
| 	 * subflows cleanup code due to lock nesting (we are already under | 	 * subflows cleanup code due to lock nesting (we are already under | ||||||
| 	 * msk->firstsocket lock). Do nothing and leave the cleanup to the | 	 * msk->firstsocket lock). | ||||||
| 	 * caller. |  | ||||||
| 	 */ | 	 */ | ||||||
| 	if (msk->fastopening) | 	if (msk->fastopening) | ||||||
| 		return 0; | 		return -EBUSY; | ||||||
| 
 | 
 | ||||||
| 	mptcp_listen_inuse_dec(sk); | 	mptcp_listen_inuse_dec(sk); | ||||||
| 	inet_sk_state_store(sk, TCP_CLOSE); | 	inet_sk_state_store(sk, TCP_CLOSE); | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Paolo Abeni
						Paolo Abeni