forked from mirrors/linux
		
	tcp: mitigate ACK loops for connections as tcp_sock
Ensure that in state ESTABLISHED, where the connection is represented by a tcp_sock, we rate limit dupacks in response to incoming packets (a) with TCP timestamps that fail PAWS checks, or (b) with sequence numbers or ACK numbers that are out of the acceptable window. We do not send a dupack in response to out-of-window packets if it has been less than sysctl_tcp_invalid_ratelimit (default 500ms) since we last sent a dupack in response to an out-of-window packet. There is already a similar (although global) rate-limiting mechanism for "challenge ACKs". When deciding whether to send a challence ACK, we first consult the new per-connection rate limit, and then the global rate limit. Reported-by: Avery Fay <avery@mixpanel.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									a9b2c06dbe
								
							
						
					
					
						commit
						f2b2c582e8
					
				
					 3 changed files with 24 additions and 7 deletions
				
			
		|  | @ -153,6 +153,7 @@ struct tcp_sock { | |||
|  	u32	snd_sml;	/* Last byte of the most recently transmitted small packet */ | ||||
| 	u32	rcv_tstamp;	/* timestamp of last received ACK (for keepalives) */ | ||||
| 	u32	lsndtime;	/* timestamp of last sent data packet (for restart window) */ | ||||
| 	u32	last_oow_ack_time;  /* timestamp of last out-of-window ACK */ | ||||
| 
 | ||||
| 	u32	tsoffset;	/* timestamp offset */ | ||||
| 
 | ||||
|  |  | |||
|  | @ -3322,13 +3322,22 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 | |||
| } | ||||
| 
 | ||||
| /* RFC 5961 7 [ACK Throttling] */ | ||||
| static void tcp_send_challenge_ack(struct sock *sk) | ||||
| static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) | ||||
| { | ||||
| 	/* unprotected vars, we dont care of overwrites */ | ||||
| 	static u32 challenge_timestamp; | ||||
| 	static unsigned int challenge_count; | ||||
| 	u32 now = jiffies / HZ; | ||||
| 	struct tcp_sock *tp = tcp_sk(sk); | ||||
| 	u32 now; | ||||
| 
 | ||||
| 	/* First check our per-socket dupack rate limit. */ | ||||
| 	if (tcp_oow_rate_limited(sock_net(sk), skb, | ||||
| 				 LINUX_MIB_TCPACKSKIPPEDCHALLENGE, | ||||
| 				 &tp->last_oow_ack_time)) | ||||
| 		return; | ||||
| 
 | ||||
| 	/* Then check the check host-wide RFC 5961 rate limit. */ | ||||
| 	now = jiffies / HZ; | ||||
| 	if (now != challenge_timestamp) { | ||||
| 		challenge_timestamp = now; | ||||
| 		challenge_count = 0; | ||||
|  | @ -3424,7 +3433,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
| 	if (before(ack, prior_snd_una)) { | ||||
| 		/* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */ | ||||
| 		if (before(ack, prior_snd_una - tp->max_window)) { | ||||
| 			tcp_send_challenge_ack(sk); | ||||
| 			tcp_send_challenge_ack(sk, skb); | ||||
| 			return -1; | ||||
| 		} | ||||
| 		goto old_ack; | ||||
|  | @ -4993,6 +5002,9 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, | |||
| 	    tcp_paws_discard(sk, skb)) { | ||||
| 		if (!th->rst) { | ||||
| 			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); | ||||
| 			if (!tcp_oow_rate_limited(sock_net(sk), skb, | ||||
| 						  LINUX_MIB_TCPACKSKIPPEDPAWS, | ||||
| 						  &tp->last_oow_ack_time)) | ||||
| 				tcp_send_dupack(sk, skb); | ||||
| 			goto discard; | ||||
| 		} | ||||
|  | @ -5010,6 +5022,9 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, | |||
| 		if (!th->rst) { | ||||
| 			if (th->syn) | ||||
| 				goto syn_challenge; | ||||
| 			if (!tcp_oow_rate_limited(sock_net(sk), skb, | ||||
| 						  LINUX_MIB_TCPACKSKIPPEDSEQ, | ||||
| 						  &tp->last_oow_ack_time)) | ||||
| 				tcp_send_dupack(sk, skb); | ||||
| 		} | ||||
| 		goto discard; | ||||
|  | @ -5026,7 +5041,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, | |||
| 		if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) | ||||
| 			tcp_reset(sk); | ||||
| 		else | ||||
| 			tcp_send_challenge_ack(sk); | ||||
| 			tcp_send_challenge_ack(sk, skb); | ||||
| 		goto discard; | ||||
| 	} | ||||
| 
 | ||||
|  | @ -5040,7 +5055,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, | |||
| 		if (syn_inerr) | ||||
| 			TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); | ||||
| 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); | ||||
| 		tcp_send_challenge_ack(sk); | ||||
| 		tcp_send_challenge_ack(sk, skb); | ||||
| 		goto discard; | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -467,6 +467,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
| 		tcp_enable_early_retrans(newtp); | ||||
| 		newtp->tlp_high_seq = 0; | ||||
| 		newtp->lsndtime = treq->snt_synack; | ||||
| 		newtp->last_oow_ack_time = 0; | ||||
| 		newtp->total_retrans = req->num_retrans; | ||||
| 
 | ||||
| 		/* So many TCP implementations out there (incorrectly) count the
 | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Neal Cardwell
						Neal Cardwell