forked from mirrors/linux
		
	[TCP]: Reduce sacked_out with reno when purging write_queue
Previously TCP had a transitional state during which reno counted segments that are already below the current window into sacked_out, which is now prevented. In addition, re-try now the unconditional S+L skb catching. This approach conservatively calls just remove_sack and leaves reset_sack() calls alone. The best solution to the whole problem would be to first calculate the new sacked_out fully (this patch does not move reno_sack_reset calls from original sites and thus does not implement this). However, that would require very invasive change to fastretrans_alert (perhaps even slicing it to two halves). Alternatively, all callers of tcp_packets_in_flight (i.e., users that depend on sacked_out) should be postponed until the new sacked_out has been calculated but it isn't any simpler alternative. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									d02596e329
								
							
						
					
					
						commit
						1b6d427bb7
					
				
					 2 changed files with 8 additions and 10 deletions
				
			
		| 
						 | 
					@ -759,8 +759,7 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Use define here intentionally to get BUG_ON location shown at the caller */
 | 
					/* Use define here intentionally to get BUG_ON location shown at the caller */
 | 
				
			||||||
#define tcp_verify_left_out(tp) \
 | 
					#define tcp_verify_left_out(tp)	BUG_ON(tcp_left_out(tp) > tp->packets_out)
 | 
				
			||||||
	BUG_ON(tp->rx_opt.sack_ok && (tcp_left_out(tp) > tp->packets_out))
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
 | 
					extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
 | 
				
			||||||
extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
 | 
					extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2187,7 +2187,7 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
 | 
				
			||||||
 * tcp_xmit_retransmit_queue().
 | 
					 * tcp_xmit_retransmit_queue().
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void
 | 
					static void
 | 
				
			||||||
tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag)
 | 
					tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct inet_connection_sock *icsk = inet_csk(sk);
 | 
						struct inet_connection_sock *icsk = inet_csk(sk);
 | 
				
			||||||
	struct tcp_sock *tp = tcp_sk(sk);
 | 
						struct tcp_sock *tp = tcp_sk(sk);
 | 
				
			||||||
| 
						 | 
					@ -2273,12 +2273,8 @@ tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag)
 | 
				
			||||||
		if (!(flag & FLAG_SND_UNA_ADVANCED)) {
 | 
							if (!(flag & FLAG_SND_UNA_ADVANCED)) {
 | 
				
			||||||
			if (IsReno(tp) && is_dupack)
 | 
								if (IsReno(tp) && is_dupack)
 | 
				
			||||||
				tcp_add_reno_sack(sk);
 | 
									tcp_add_reno_sack(sk);
 | 
				
			||||||
		} else {
 | 
							} else
 | 
				
			||||||
			int acked = prior_packets - tp->packets_out;
 | 
								do_lost = tcp_try_undo_partial(sk, pkts_acked);
 | 
				
			||||||
			if (IsReno(tp))
 | 
					 | 
				
			||||||
				tcp_remove_reno_sacks(sk, acked);
 | 
					 | 
				
			||||||
			do_lost = tcp_try_undo_partial(sk, acked);
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	case TCP_CA_Loss:
 | 
						case TCP_CA_Loss:
 | 
				
			||||||
		if (flag&FLAG_DATA_ACKED)
 | 
							if (flag&FLAG_DATA_ACKED)
 | 
				
			||||||
| 
						 | 
					@ -2577,6 +2573,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
 | 
				
			||||||
		tcp_ack_update_rtt(sk, acked, seq_rtt);
 | 
							tcp_ack_update_rtt(sk, acked, seq_rtt);
 | 
				
			||||||
		tcp_ack_packets_out(sk);
 | 
							tcp_ack_packets_out(sk);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (IsReno(tp))
 | 
				
			||||||
 | 
								tcp_remove_reno_sacks(sk, pkts_acked);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (ca_ops->pkts_acked) {
 | 
							if (ca_ops->pkts_acked) {
 | 
				
			||||||
			s32 rtt_us = -1;
 | 
								s32 rtt_us = -1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2927,7 +2926,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
 | 
				
			||||||
		if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
 | 
							if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
 | 
				
			||||||
		    tcp_may_raise_cwnd(sk, flag))
 | 
							    tcp_may_raise_cwnd(sk, flag))
 | 
				
			||||||
			tcp_cong_avoid(sk, ack, prior_in_flight, 0);
 | 
								tcp_cong_avoid(sk, ack, prior_in_flight, 0);
 | 
				
			||||||
		tcp_fastretrans_alert(sk, prior_packets, flag);
 | 
							tcp_fastretrans_alert(sk, prior_packets - tp->packets_out, flag);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
 | 
							if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
 | 
				
			||||||
			tcp_cong_avoid(sk, ack, prior_in_flight, 1);
 | 
								tcp_cong_avoid(sk, ack, prior_in_flight, 1);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue