forked from mirrors/linux
		
	mptcp: call tcp_cleanup_rbuf on subflows
That is needed to let the subflows announce promptly when new space is available in the receive buffer. tcp_cleanup_rbuf() is currently a static function, drop the scope modifier and add a declaration in the TCP header. Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									d5f49190de
								
							
						
					
					
						commit
						c76c695656
					
				
					 4 changed files with 11 additions and 1 deletions
				
			
		|  | @ -1414,6 +1414,8 @@ static inline int tcp_full_space(const struct sock *sk) | ||||||
| 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); | 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | void tcp_cleanup_rbuf(struct sock *sk, int copied); | ||||||
|  | 
 | ||||||
| /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
 | /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
 | ||||||
|  * If 87.5 % (7/8) of the space has been consumed, we want to override |  * If 87.5 % (7/8) of the space has been consumed, we want to override | ||||||
|  * SO_RCVLOWAT constraint, since we are receiving skbs with too small |  * SO_RCVLOWAT constraint, since we are receiving skbs with too small | ||||||
|  |  | ||||||
|  | @ -1527,7 +1527,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) | ||||||
|  * calculation of whether or not we must ACK for the sake of |  * calculation of whether or not we must ACK for the sake of | ||||||
|  * a window update. |  * a window update. | ||||||
|  */ |  */ | ||||||
| static void tcp_cleanup_rbuf(struct sock *sk, int copied) | void tcp_cleanup_rbuf(struct sock *sk, int copied) | ||||||
| { | { | ||||||
| 	struct tcp_sock *tp = tcp_sk(sk); | 	struct tcp_sock *tp = tcp_sk(sk); | ||||||
| 	bool time_to_ack = false; | 	bool time_to_ack = false; | ||||||
|  |  | ||||||
|  | @ -515,6 +515,8 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, | ||||||
| 	} while (more_data_avail); | 	} while (more_data_avail); | ||||||
| 
 | 
 | ||||||
| 	*bytes += moved; | 	*bytes += moved; | ||||||
|  | 	if (moved) | ||||||
|  | 		tcp_cleanup_rbuf(ssk, moved); | ||||||
| 
 | 
 | ||||||
| 	return done; | 	return done; | ||||||
| } | } | ||||||
|  | @ -1424,10 +1426,14 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) | ||||||
| 			 */ | 			 */ | ||||||
| 			mptcp_for_each_subflow(msk, subflow) { | 			mptcp_for_each_subflow(msk, subflow) { | ||||||
| 				struct sock *ssk; | 				struct sock *ssk; | ||||||
|  | 				bool slow; | ||||||
| 
 | 
 | ||||||
| 				ssk = mptcp_subflow_tcp_sock(subflow); | 				ssk = mptcp_subflow_tcp_sock(subflow); | ||||||
|  | 				slow = lock_sock_fast(ssk); | ||||||
| 				WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); | 				WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); | ||||||
| 				tcp_sk(ssk)->window_clamp = window_clamp; | 				tcp_sk(ssk)->window_clamp = window_clamp; | ||||||
|  | 				tcp_cleanup_rbuf(ssk, 1); | ||||||
|  | 				unlock_sock_fast(ssk, slow); | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -823,6 +823,8 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb, | ||||||
| 		sk_eat_skb(ssk, skb); | 		sk_eat_skb(ssk, skb); | ||||||
| 	if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) | 	if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) | ||||||
| 		subflow->map_valid = 0; | 		subflow->map_valid = 0; | ||||||
|  | 	if (incr) | ||||||
|  | 		tcp_cleanup_rbuf(ssk, incr); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static bool subflow_check_data_avail(struct sock *ssk) | static bool subflow_check_data_avail(struct sock *ssk) | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Paolo Abeni
						Paolo Abeni