forked from mirrors/linux
		
	net/smc: replace sock_put worker by socket refcounting
Proper socket refcounting makes the sock_put worker obsolete. Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									8dce2786a2
								
							
						
					
					
						commit
						51f1de79ad
					
				
					 6 changed files with 88 additions and 68 deletions
				
			
		| 
						 | 
					@ -115,7 +115,6 @@ static int smc_release(struct socket *sock)
 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	smc = smc_sk(sk);
 | 
						smc = smc_sk(sk);
 | 
				
			||||||
	sock_hold(sk);
 | 
					 | 
				
			||||||
	if (sk->sk_state == SMC_LISTEN)
 | 
						if (sk->sk_state == SMC_LISTEN)
 | 
				
			||||||
		/* smc_close_non_accepted() is called and acquires
 | 
							/* smc_close_non_accepted() is called and acquires
 | 
				
			||||||
		 * sock lock for child sockets again
 | 
							 * sock lock for child sockets again
 | 
				
			||||||
| 
						 | 
					@ -124,10 +123,7 @@ static int smc_release(struct socket *sock)
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		lock_sock(sk);
 | 
							lock_sock(sk);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (smc->use_fallback) {
 | 
						if (!smc->use_fallback) {
 | 
				
			||||||
		sk->sk_state = SMC_CLOSED;
 | 
					 | 
				
			||||||
		sk->sk_state_change(sk);
 | 
					 | 
				
			||||||
	} else {
 | 
					 | 
				
			||||||
		rc = smc_close_active(smc);
 | 
							rc = smc_close_active(smc);
 | 
				
			||||||
		sock_set_flag(sk, SOCK_DEAD);
 | 
							sock_set_flag(sk, SOCK_DEAD);
 | 
				
			||||||
		sk->sk_shutdown |= SHUTDOWN_MASK;
 | 
							sk->sk_shutdown |= SHUTDOWN_MASK;
 | 
				
			||||||
| 
						 | 
					@ -136,20 +132,21 @@ static int smc_release(struct socket *sock)
 | 
				
			||||||
		sock_release(smc->clcsock);
 | 
							sock_release(smc->clcsock);
 | 
				
			||||||
		smc->clcsock = NULL;
 | 
							smc->clcsock = NULL;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						if (smc->use_fallback) {
 | 
				
			||||||
 | 
							sock_put(sk); /* passive closing */
 | 
				
			||||||
 | 
							sk->sk_state = SMC_CLOSED;
 | 
				
			||||||
 | 
							sk->sk_state_change(sk);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* detach socket */
 | 
						/* detach socket */
 | 
				
			||||||
	sock_orphan(sk);
 | 
						sock_orphan(sk);
 | 
				
			||||||
	sock->sk = NULL;
 | 
						sock->sk = NULL;
 | 
				
			||||||
	if (smc->use_fallback) {
 | 
						if (!smc->use_fallback && sk->sk_state == SMC_CLOSED)
 | 
				
			||||||
		schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN);
 | 
					 | 
				
			||||||
	} else if (sk->sk_state == SMC_CLOSED) {
 | 
					 | 
				
			||||||
		smc_conn_free(&smc->conn);
 | 
							smc_conn_free(&smc->conn);
 | 
				
			||||||
		schedule_delayed_work(&smc->sock_put_work,
 | 
					 | 
				
			||||||
				      SMC_CLOSE_SOCK_PUT_DELAY);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	release_sock(sk);
 | 
						release_sock(sk);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	sock_put(sk);
 | 
						sk->sk_prot->unhash(sk);
 | 
				
			||||||
 | 
						sock_put(sk); /* final sock_put */
 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	return rc;
 | 
						return rc;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -181,7 +178,6 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock)
 | 
				
			||||||
	INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
 | 
						INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
 | 
				
			||||||
	INIT_LIST_HEAD(&smc->accept_q);
 | 
						INIT_LIST_HEAD(&smc->accept_q);
 | 
				
			||||||
	spin_lock_init(&smc->accept_q_lock);
 | 
						spin_lock_init(&smc->accept_q_lock);
 | 
				
			||||||
	INIT_DELAYED_WORK(&smc->sock_put_work, smc_close_sock_put_work);
 | 
					 | 
				
			||||||
	sk->sk_prot->hash(sk);
 | 
						sk->sk_prot->hash(sk);
 | 
				
			||||||
	sk_refcnt_debug_inc(sk);
 | 
						sk_refcnt_debug_inc(sk);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -399,6 +395,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
 | 
				
			||||||
	int rc = 0;
 | 
						int rc = 0;
 | 
				
			||||||
	u8 ibport;
 | 
						u8 ibport;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						sock_hold(&smc->sk); /* sock put in passive closing */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!tcp_sk(smc->clcsock->sk)->syn_smc) {
 | 
						if (!tcp_sk(smc->clcsock->sk)->syn_smc) {
 | 
				
			||||||
		/* peer has not signalled SMC-capability */
 | 
							/* peer has not signalled SMC-capability */
 | 
				
			||||||
		smc->use_fallback = true;
 | 
							smc->use_fallback = true;
 | 
				
			||||||
| 
						 | 
					@ -542,6 +540,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
 | 
				
			||||||
	mutex_unlock(&smc_create_lgr_pending);
 | 
						mutex_unlock(&smc_create_lgr_pending);
 | 
				
			||||||
	smc_conn_free(&smc->conn);
 | 
						smc_conn_free(&smc->conn);
 | 
				
			||||||
out_err:
 | 
					out_err:
 | 
				
			||||||
 | 
						if (smc->sk.sk_state == SMC_INIT)
 | 
				
			||||||
 | 
							sock_put(&smc->sk); /* passive closing */
 | 
				
			||||||
	return rc;
 | 
						return rc;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -620,7 +620,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
 | 
				
			||||||
		new_sk->sk_state = SMC_CLOSED;
 | 
							new_sk->sk_state = SMC_CLOSED;
 | 
				
			||||||
		sock_set_flag(new_sk, SOCK_DEAD);
 | 
							sock_set_flag(new_sk, SOCK_DEAD);
 | 
				
			||||||
		new_sk->sk_prot->unhash(new_sk);
 | 
							new_sk->sk_prot->unhash(new_sk);
 | 
				
			||||||
		sock_put(new_sk);
 | 
							sock_put(new_sk); /* final */
 | 
				
			||||||
		*new_smc = NULL;
 | 
							*new_smc = NULL;
 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -637,7 +637,7 @@ static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct smc_sock *par = smc_sk(parent);
 | 
						struct smc_sock *par = smc_sk(parent);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	sock_hold(sk);
 | 
						sock_hold(sk); /* sock_put in smc_accept_unlink () */
 | 
				
			||||||
	spin_lock(&par->accept_q_lock);
 | 
						spin_lock(&par->accept_q_lock);
 | 
				
			||||||
	list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
 | 
						list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
 | 
				
			||||||
	spin_unlock(&par->accept_q_lock);
 | 
						spin_unlock(&par->accept_q_lock);
 | 
				
			||||||
| 
						 | 
					@ -653,7 +653,7 @@ static void smc_accept_unlink(struct sock *sk)
 | 
				
			||||||
	list_del_init(&smc_sk(sk)->accept_q);
 | 
						list_del_init(&smc_sk(sk)->accept_q);
 | 
				
			||||||
	spin_unlock(&par->accept_q_lock);
 | 
						spin_unlock(&par->accept_q_lock);
 | 
				
			||||||
	sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
 | 
						sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
 | 
				
			||||||
	sock_put(sk);
 | 
						sock_put(sk); /* sock_hold in smc_accept_enqueue */
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* remove a sock from the accept queue to bind it to a new socket created
 | 
					/* remove a sock from the accept queue to bind it to a new socket created
 | 
				
			||||||
| 
						 | 
					@ -671,7 +671,7 @@ struct sock *smc_accept_dequeue(struct sock *parent,
 | 
				
			||||||
		smc_accept_unlink(new_sk);
 | 
							smc_accept_unlink(new_sk);
 | 
				
			||||||
		if (new_sk->sk_state == SMC_CLOSED) {
 | 
							if (new_sk->sk_state == SMC_CLOSED) {
 | 
				
			||||||
			new_sk->sk_prot->unhash(new_sk);
 | 
								new_sk->sk_prot->unhash(new_sk);
 | 
				
			||||||
			sock_put(new_sk);
 | 
								sock_put(new_sk); /* final */
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if (new_sock)
 | 
							if (new_sock)
 | 
				
			||||||
| 
						 | 
					@ -686,14 +686,11 @@ void smc_close_non_accepted(struct sock *sk)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct smc_sock *smc = smc_sk(sk);
 | 
						struct smc_sock *smc = smc_sk(sk);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	sock_hold(sk);
 | 
					 | 
				
			||||||
	lock_sock(sk);
 | 
						lock_sock(sk);
 | 
				
			||||||
	if (!sk->sk_lingertime)
 | 
						if (!sk->sk_lingertime)
 | 
				
			||||||
		/* wait for peer closing */
 | 
							/* wait for peer closing */
 | 
				
			||||||
		sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
 | 
							sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
 | 
				
			||||||
	if (smc->use_fallback) {
 | 
						if (!smc->use_fallback) {
 | 
				
			||||||
		sk->sk_state = SMC_CLOSED;
 | 
					 | 
				
			||||||
	} else {
 | 
					 | 
				
			||||||
		smc_close_active(smc);
 | 
							smc_close_active(smc);
 | 
				
			||||||
		sock_set_flag(sk, SOCK_DEAD);
 | 
							sock_set_flag(sk, SOCK_DEAD);
 | 
				
			||||||
		sk->sk_shutdown |= SHUTDOWN_MASK;
 | 
							sk->sk_shutdown |= SHUTDOWN_MASK;
 | 
				
			||||||
| 
						 | 
					@ -706,14 +703,15 @@ void smc_close_non_accepted(struct sock *sk)
 | 
				
			||||||
		sock_release(tcp);
 | 
							sock_release(tcp);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if (smc->use_fallback) {
 | 
						if (smc->use_fallback) {
 | 
				
			||||||
		schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN);
 | 
							sock_put(sk); /* passive closing */
 | 
				
			||||||
	} else if (sk->sk_state == SMC_CLOSED) {
 | 
							sk->sk_state = SMC_CLOSED;
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							if (sk->sk_state == SMC_CLOSED)
 | 
				
			||||||
			smc_conn_free(&smc->conn);
 | 
								smc_conn_free(&smc->conn);
 | 
				
			||||||
		schedule_delayed_work(&smc->sock_put_work,
 | 
					 | 
				
			||||||
				      SMC_CLOSE_SOCK_PUT_DELAY);
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	release_sock(sk);
 | 
						release_sock(sk);
 | 
				
			||||||
	sock_put(sk);
 | 
						sk->sk_prot->unhash(sk);
 | 
				
			||||||
 | 
						sock_put(sk); /* final sock_put */
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int smc_serv_conf_first_link(struct smc_sock *smc)
 | 
					static int smc_serv_conf_first_link(struct smc_sock *smc)
 | 
				
			||||||
| 
						 | 
					@ -937,6 +935,8 @@ static void smc_listen_work(struct work_struct *work)
 | 
				
			||||||
		smc_lgr_forget(new_smc->conn.lgr);
 | 
							smc_lgr_forget(new_smc->conn.lgr);
 | 
				
			||||||
	mutex_unlock(&smc_create_lgr_pending);
 | 
						mutex_unlock(&smc_create_lgr_pending);
 | 
				
			||||||
out_err:
 | 
					out_err:
 | 
				
			||||||
 | 
						if (newsmcsk->sk_state == SMC_INIT)
 | 
				
			||||||
 | 
							sock_put(&new_smc->sk); /* passive closing */
 | 
				
			||||||
	newsmcsk->sk_state = SMC_CLOSED;
 | 
						newsmcsk->sk_state = SMC_CLOSED;
 | 
				
			||||||
	smc_conn_free(&new_smc->conn);
 | 
						smc_conn_free(&new_smc->conn);
 | 
				
			||||||
	goto enqueue; /* queue new sock with sk_err set */
 | 
						goto enqueue; /* queue new sock with sk_err set */
 | 
				
			||||||
| 
						 | 
					@ -963,12 +963,15 @@ static void smc_tcp_listen_work(struct work_struct *work)
 | 
				
			||||||
		sock_hold(lsk); /* sock_put in smc_listen_work */
 | 
							sock_hold(lsk); /* sock_put in smc_listen_work */
 | 
				
			||||||
		INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
 | 
							INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
 | 
				
			||||||
		smc_copy_sock_settings_to_smc(new_smc);
 | 
							smc_copy_sock_settings_to_smc(new_smc);
 | 
				
			||||||
		schedule_work(&new_smc->smc_listen_work);
 | 
							sock_hold(&new_smc->sk); /* sock_put in passive closing */
 | 
				
			||||||
 | 
							if (!schedule_work(&new_smc->smc_listen_work))
 | 
				
			||||||
 | 
								sock_put(&new_smc->sk);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	release_sock(lsk);
 | 
						release_sock(lsk);
 | 
				
			||||||
	lsk->sk_data_ready(lsk); /* no more listening, wake accept */
 | 
						lsk->sk_data_ready(lsk); /* no more listening, wake accept */
 | 
				
			||||||
 | 
						sock_put(&lsmc->sk); /* sock_hold in smc_listen */
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int smc_listen(struct socket *sock, int backlog)
 | 
					static int smc_listen(struct socket *sock, int backlog)
 | 
				
			||||||
| 
						 | 
					@ -1002,7 +1005,9 @@ static int smc_listen(struct socket *sock, int backlog)
 | 
				
			||||||
	sk->sk_ack_backlog = 0;
 | 
						sk->sk_ack_backlog = 0;
 | 
				
			||||||
	sk->sk_state = SMC_LISTEN;
 | 
						sk->sk_state = SMC_LISTEN;
 | 
				
			||||||
	INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
 | 
						INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
 | 
				
			||||||
	schedule_work(&smc->tcp_listen_work);
 | 
						sock_hold(sk); /* sock_hold in tcp_listen_worker */
 | 
				
			||||||
 | 
						if (!schedule_work(&smc->tcp_listen_work))
 | 
				
			||||||
 | 
							sock_put(sk);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	release_sock(sk);
 | 
						release_sock(sk);
 | 
				
			||||||
| 
						 | 
					@ -1019,6 +1024,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
 | 
				
			||||||
	int rc = 0;
 | 
						int rc = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	lsmc = smc_sk(sk);
 | 
						lsmc = smc_sk(sk);
 | 
				
			||||||
 | 
						sock_hold(sk); /* sock_put below */
 | 
				
			||||||
	lock_sock(sk);
 | 
						lock_sock(sk);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (lsmc->sk.sk_state != SMC_LISTEN) {
 | 
						if (lsmc->sk.sk_state != SMC_LISTEN) {
 | 
				
			||||||
| 
						 | 
					@ -1053,6 +1059,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	release_sock(sk);
 | 
						release_sock(sk);
 | 
				
			||||||
 | 
						sock_put(sk); /* sock_hold above */
 | 
				
			||||||
	return rc;
 | 
						return rc;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -178,7 +178,6 @@ struct smc_sock {				/* smc sock container */
 | 
				
			||||||
	struct work_struct	smc_listen_work;/* prepare new accept socket */
 | 
						struct work_struct	smc_listen_work;/* prepare new accept socket */
 | 
				
			||||||
	struct list_head	accept_q;	/* sockets to be accepted */
 | 
						struct list_head	accept_q;	/* sockets to be accepted */
 | 
				
			||||||
	spinlock_t		accept_q_lock;	/* protects accept_q */
 | 
						spinlock_t		accept_q_lock;	/* protects accept_q */
 | 
				
			||||||
	struct delayed_work	sock_put_work;	/* final socket freeing */
 | 
					 | 
				
			||||||
	bool			use_fallback;	/* fallback to tcp */
 | 
						bool			use_fallback;	/* fallback to tcp */
 | 
				
			||||||
	u8			wait_close_tx_prepared : 1;
 | 
						u8			wait_close_tx_prepared : 1;
 | 
				
			||||||
						/* shutdown wr or close
 | 
											/* shutdown wr or close
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -212,6 +212,14 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
 | 
				
			||||||
		smc->sk.sk_data_ready(&smc->sk);
 | 
							smc->sk.sk_data_ready(&smc->sk);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* piggy backed tx info */
 | 
				
			||||||
 | 
						/* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
 | 
				
			||||||
 | 
						if (diff_cons && smc_tx_prepared_sends(conn)) {
 | 
				
			||||||
 | 
							smc_tx_sndbuf_nonempty(conn);
 | 
				
			||||||
 | 
							/* trigger socket release if connection closed */
 | 
				
			||||||
 | 
							smc_close_wake_tx_prepared(smc);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
 | 
						if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
 | 
				
			||||||
		smc->sk.sk_err = ECONNRESET;
 | 
							smc->sk.sk_err = ECONNRESET;
 | 
				
			||||||
		conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
 | 
							conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
 | 
				
			||||||
| 
						 | 
					@ -221,15 +229,9 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
 | 
				
			||||||
		if (smc->clcsock && smc->clcsock->sk)
 | 
							if (smc->clcsock && smc->clcsock->sk)
 | 
				
			||||||
			smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
 | 
								smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
 | 
				
			||||||
		sock_set_flag(&smc->sk, SOCK_DONE);
 | 
							sock_set_flag(&smc->sk, SOCK_DONE);
 | 
				
			||||||
		schedule_work(&conn->close_work);
 | 
							sock_hold(&smc->sk); /* sock_put in close_work */
 | 
				
			||||||
	}
 | 
							if (!schedule_work(&conn->close_work))
 | 
				
			||||||
 | 
								sock_put(&smc->sk);
 | 
				
			||||||
	/* piggy backed tx info */
 | 
					 | 
				
			||||||
	/* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
 | 
					 | 
				
			||||||
	if (diff_cons && smc_tx_prepared_sends(conn)) {
 | 
					 | 
				
			||||||
		smc_tx_sndbuf_nonempty(conn);
 | 
					 | 
				
			||||||
		/* trigger socket release if connection closed */
 | 
					 | 
				
			||||||
		smc_close_wake_tx_prepared(smc);
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -110,6 +110,7 @@ static void smc_close_active_abort(struct smc_sock *smc)
 | 
				
			||||||
		release_sock(sk);
 | 
							release_sock(sk);
 | 
				
			||||||
		cancel_delayed_work_sync(&smc->conn.tx_work);
 | 
							cancel_delayed_work_sync(&smc->conn.tx_work);
 | 
				
			||||||
		lock_sock(sk);
 | 
							lock_sock(sk);
 | 
				
			||||||
 | 
							sock_put(sk); /* passive closing */
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	case SMC_APPCLOSEWAIT1:
 | 
						case SMC_APPCLOSEWAIT1:
 | 
				
			||||||
	case SMC_APPCLOSEWAIT2:
 | 
						case SMC_APPCLOSEWAIT2:
 | 
				
			||||||
| 
						 | 
					@ -125,11 +126,13 @@ static void smc_close_active_abort(struct smc_sock *smc)
 | 
				
			||||||
	case SMC_PEERCLOSEWAIT1:
 | 
						case SMC_PEERCLOSEWAIT1:
 | 
				
			||||||
	case SMC_PEERCLOSEWAIT2:
 | 
						case SMC_PEERCLOSEWAIT2:
 | 
				
			||||||
		if (!txflags->peer_conn_closed) {
 | 
							if (!txflags->peer_conn_closed) {
 | 
				
			||||||
 | 
								/* just SHUTDOWN_SEND done */
 | 
				
			||||||
			sk->sk_state = SMC_PEERABORTWAIT;
 | 
								sk->sk_state = SMC_PEERABORTWAIT;
 | 
				
			||||||
			sock_release(smc->clcsock);
 | 
								sock_release(smc->clcsock);
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			sk->sk_state = SMC_CLOSED;
 | 
								sk->sk_state = SMC_CLOSED;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
							sock_put(sk); /* passive closing */
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	case SMC_PROCESSABORT:
 | 
						case SMC_PROCESSABORT:
 | 
				
			||||||
	case SMC_APPFINCLOSEWAIT:
 | 
						case SMC_APPFINCLOSEWAIT:
 | 
				
			||||||
| 
						 | 
					@ -138,6 +141,8 @@ static void smc_close_active_abort(struct smc_sock *smc)
 | 
				
			||||||
		sk->sk_state = SMC_CLOSED;
 | 
							sk->sk_state = SMC_CLOSED;
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	case SMC_PEERFINCLOSEWAIT:
 | 
						case SMC_PEERFINCLOSEWAIT:
 | 
				
			||||||
 | 
							sock_put(sk); /* passive closing */
 | 
				
			||||||
 | 
							break;
 | 
				
			||||||
	case SMC_PEERABORTWAIT:
 | 
						case SMC_PEERABORTWAIT:
 | 
				
			||||||
	case SMC_CLOSED:
 | 
						case SMC_CLOSED:
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
| 
						 | 
					@ -229,12 +234,14 @@ int smc_close_active(struct smc_sock *smc)
 | 
				
			||||||
		rc = smc_close_final(conn);
 | 
							rc = smc_close_final(conn);
 | 
				
			||||||
		if (rc)
 | 
							if (rc)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		if (smc_cdc_rxed_any_close(conn))
 | 
							if (smc_cdc_rxed_any_close(conn)) {
 | 
				
			||||||
			/* peer has closed the socket already */
 | 
								/* peer has closed the socket already */
 | 
				
			||||||
			sk->sk_state = SMC_CLOSED;
 | 
								sk->sk_state = SMC_CLOSED;
 | 
				
			||||||
		else
 | 
								sock_put(sk); /* postponed passive closing */
 | 
				
			||||||
 | 
							} else {
 | 
				
			||||||
			/* peer has just issued a shutdown write */
 | 
								/* peer has just issued a shutdown write */
 | 
				
			||||||
			sk->sk_state = SMC_PEERFINCLOSEWAIT;
 | 
								sk->sk_state = SMC_PEERFINCLOSEWAIT;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	case SMC_PEERCLOSEWAIT1:
 | 
						case SMC_PEERCLOSEWAIT1:
 | 
				
			||||||
	case SMC_PEERCLOSEWAIT2:
 | 
						case SMC_PEERCLOSEWAIT2:
 | 
				
			||||||
| 
						 | 
					@ -272,27 +279,33 @@ static void smc_close_passive_abort_received(struct smc_sock *smc)
 | 
				
			||||||
	struct sock *sk = &smc->sk;
 | 
						struct sock *sk = &smc->sk;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	switch (sk->sk_state) {
 | 
						switch (sk->sk_state) {
 | 
				
			||||||
 | 
						case SMC_INIT:
 | 
				
			||||||
	case SMC_ACTIVE:
 | 
						case SMC_ACTIVE:
 | 
				
			||||||
	case SMC_APPFINCLOSEWAIT:
 | 
					 | 
				
			||||||
	case SMC_APPCLOSEWAIT1:
 | 
						case SMC_APPCLOSEWAIT1:
 | 
				
			||||||
	case SMC_APPCLOSEWAIT2:
 | 
							sk->sk_state = SMC_PROCESSABORT;
 | 
				
			||||||
 | 
							sock_put(sk); /* passive closing */
 | 
				
			||||||
 | 
							break;
 | 
				
			||||||
 | 
						case SMC_APPFINCLOSEWAIT:
 | 
				
			||||||
		sk->sk_state = SMC_PROCESSABORT;
 | 
							sk->sk_state = SMC_PROCESSABORT;
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	case SMC_PEERCLOSEWAIT1:
 | 
						case SMC_PEERCLOSEWAIT1:
 | 
				
			||||||
	case SMC_PEERCLOSEWAIT2:
 | 
						case SMC_PEERCLOSEWAIT2:
 | 
				
			||||||
		if (txflags->peer_done_writing &&
 | 
							if (txflags->peer_done_writing &&
 | 
				
			||||||
		    !smc_close_sent_any_close(&smc->conn)) {
 | 
							    !smc_close_sent_any_close(&smc->conn))
 | 
				
			||||||
			/* just shutdown, but not yet closed locally */
 | 
								/* just shutdown, but not yet closed locally */
 | 
				
			||||||
			sk->sk_state = SMC_PROCESSABORT;
 | 
								sk->sk_state = SMC_PROCESSABORT;
 | 
				
			||||||
		} else {
 | 
							else
 | 
				
			||||||
			sk->sk_state = SMC_CLOSED;
 | 
								sk->sk_state = SMC_CLOSED;
 | 
				
			||||||
		}
 | 
							sock_put(sk); /* passive closing */
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
 | 
						case SMC_APPCLOSEWAIT2:
 | 
				
			||||||
	case SMC_PEERFINCLOSEWAIT:
 | 
						case SMC_PEERFINCLOSEWAIT:
 | 
				
			||||||
 | 
							sk->sk_state = SMC_CLOSED;
 | 
				
			||||||
 | 
							sock_put(sk); /* passive closing */
 | 
				
			||||||
 | 
							break;
 | 
				
			||||||
	case SMC_PEERABORTWAIT:
 | 
						case SMC_PEERABORTWAIT:
 | 
				
			||||||
		sk->sk_state = SMC_CLOSED;
 | 
							sk->sk_state = SMC_CLOSED;
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	case SMC_INIT:
 | 
					 | 
				
			||||||
	case SMC_PROCESSABORT:
 | 
						case SMC_PROCESSABORT:
 | 
				
			||||||
	/* nothing to do, add tracing in future patch */
 | 
						/* nothing to do, add tracing in future patch */
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
| 
						 | 
					@ -336,13 +349,18 @@ static void smc_close_passive_work(struct work_struct *work)
 | 
				
			||||||
	case SMC_INIT:
 | 
						case SMC_INIT:
 | 
				
			||||||
		if (atomic_read(&conn->bytes_to_rcv) ||
 | 
							if (atomic_read(&conn->bytes_to_rcv) ||
 | 
				
			||||||
		    (rxflags->peer_done_writing &&
 | 
							    (rxflags->peer_done_writing &&
 | 
				
			||||||
		     !smc_cdc_rxed_any_close(conn)))
 | 
							     !smc_cdc_rxed_any_close(conn))) {
 | 
				
			||||||
			sk->sk_state = SMC_APPCLOSEWAIT1;
 | 
								sk->sk_state = SMC_APPCLOSEWAIT1;
 | 
				
			||||||
		else
 | 
							} else {
 | 
				
			||||||
			sk->sk_state = SMC_CLOSED;
 | 
								sk->sk_state = SMC_CLOSED;
 | 
				
			||||||
 | 
								sock_put(sk); /* passive closing */
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	case SMC_ACTIVE:
 | 
						case SMC_ACTIVE:
 | 
				
			||||||
		sk->sk_state = SMC_APPCLOSEWAIT1;
 | 
							sk->sk_state = SMC_APPCLOSEWAIT1;
 | 
				
			||||||
 | 
							/* postpone sock_put() for passive closing to cover
 | 
				
			||||||
 | 
							 * received SEND_SHUTDOWN as well
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	case SMC_PEERCLOSEWAIT1:
 | 
						case SMC_PEERCLOSEWAIT1:
 | 
				
			||||||
		if (rxflags->peer_done_writing)
 | 
							if (rxflags->peer_done_writing)
 | 
				
			||||||
| 
						 | 
					@ -360,13 +378,20 @@ static void smc_close_passive_work(struct work_struct *work)
 | 
				
			||||||
			/* just shutdown, but not yet closed locally */
 | 
								/* just shutdown, but not yet closed locally */
 | 
				
			||||||
			sk->sk_state = SMC_APPFINCLOSEWAIT;
 | 
								sk->sk_state = SMC_APPFINCLOSEWAIT;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
							sock_put(sk); /* passive closing */
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	case SMC_PEERFINCLOSEWAIT:
 | 
						case SMC_PEERFINCLOSEWAIT:
 | 
				
			||||||
		if (smc_cdc_rxed_any_close(conn))
 | 
							if (smc_cdc_rxed_any_close(conn)) {
 | 
				
			||||||
			sk->sk_state = SMC_CLOSED;
 | 
								sk->sk_state = SMC_CLOSED;
 | 
				
			||||||
 | 
								sock_put(sk); /* passive closing */
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	case SMC_APPCLOSEWAIT1:
 | 
						case SMC_APPCLOSEWAIT1:
 | 
				
			||||||
	case SMC_APPCLOSEWAIT2:
 | 
						case SMC_APPCLOSEWAIT2:
 | 
				
			||||||
 | 
							/* postpone sock_put() for passive closing to cover
 | 
				
			||||||
 | 
							 * received SEND_SHUTDOWN as well
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							break;
 | 
				
			||||||
	case SMC_APPFINCLOSEWAIT:
 | 
						case SMC_APPFINCLOSEWAIT:
 | 
				
			||||||
	case SMC_PEERABORTWAIT:
 | 
						case SMC_PEERABORTWAIT:
 | 
				
			||||||
	case SMC_PROCESSABORT:
 | 
						case SMC_PROCESSABORT:
 | 
				
			||||||
| 
						 | 
					@ -382,23 +407,11 @@ static void smc_close_passive_work(struct work_struct *work)
 | 
				
			||||||
	if (old_state != sk->sk_state) {
 | 
						if (old_state != sk->sk_state) {
 | 
				
			||||||
		sk->sk_state_change(sk);
 | 
							sk->sk_state_change(sk);
 | 
				
			||||||
		if ((sk->sk_state == SMC_CLOSED) &&
 | 
							if ((sk->sk_state == SMC_CLOSED) &&
 | 
				
			||||||
		    (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
 | 
							    (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket))
 | 
				
			||||||
			smc_conn_free(conn);
 | 
								smc_conn_free(conn);
 | 
				
			||||||
			schedule_delayed_work(&smc->sock_put_work,
 | 
					 | 
				
			||||||
					      SMC_CLOSE_SOCK_PUT_DELAY);
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	release_sock(sk);
 | 
						release_sock(sk);
 | 
				
			||||||
}
 | 
						sock_put(sk); /* sock_hold done by schedulers of close_work */
 | 
				
			||||||
 | 
					 | 
				
			||||||
void smc_close_sock_put_work(struct work_struct *work)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct smc_sock *smc = container_of(to_delayed_work(work),
 | 
					 | 
				
			||||||
					    struct smc_sock,
 | 
					 | 
				
			||||||
					    sock_put_work);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	smc->sk.sk_prot->unhash(&smc->sk);
 | 
					 | 
				
			||||||
	sock_put(&smc->sk);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int smc_close_shutdown_write(struct smc_sock *smc)
 | 
					int smc_close_shutdown_write(struct smc_sock *smc)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -21,7 +21,6 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void smc_close_wake_tx_prepared(struct smc_sock *smc);
 | 
					void smc_close_wake_tx_prepared(struct smc_sock *smc);
 | 
				
			||||||
int smc_close_active(struct smc_sock *smc);
 | 
					int smc_close_active(struct smc_sock *smc);
 | 
				
			||||||
void smc_close_sock_put_work(struct work_struct *work);
 | 
					 | 
				
			||||||
int smc_close_shutdown_write(struct smc_sock *smc);
 | 
					int smc_close_shutdown_write(struct smc_sock *smc);
 | 
				
			||||||
void smc_close_init(struct smc_sock *smc);
 | 
					void smc_close_init(struct smc_sock *smc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -328,13 +328,13 @@ void smc_lgr_terminate(struct smc_link_group *lgr)
 | 
				
			||||||
	while (node) {
 | 
						while (node) {
 | 
				
			||||||
		conn = rb_entry(node, struct smc_connection, alert_node);
 | 
							conn = rb_entry(node, struct smc_connection, alert_node);
 | 
				
			||||||
		smc = container_of(conn, struct smc_sock, conn);
 | 
							smc = container_of(conn, struct smc_sock, conn);
 | 
				
			||||||
		sock_hold(&smc->sk);
 | 
							sock_hold(&smc->sk); /* sock_put in close work */
 | 
				
			||||||
		conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
 | 
							conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
 | 
				
			||||||
		__smc_lgr_unregister_conn(conn);
 | 
							__smc_lgr_unregister_conn(conn);
 | 
				
			||||||
		write_unlock_bh(&lgr->conns_lock);
 | 
							write_unlock_bh(&lgr->conns_lock);
 | 
				
			||||||
		schedule_work(&conn->close_work);
 | 
							if (!schedule_work(&conn->close_work))
 | 
				
			||||||
		write_lock_bh(&lgr->conns_lock);
 | 
					 | 
				
			||||||
			sock_put(&smc->sk);
 | 
								sock_put(&smc->sk);
 | 
				
			||||||
 | 
							write_lock_bh(&lgr->conns_lock);
 | 
				
			||||||
		node = rb_first(&lgr->conns_all);
 | 
							node = rb_first(&lgr->conns_all);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	write_unlock_bh(&lgr->conns_lock);
 | 
						write_unlock_bh(&lgr->conns_lock);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue