forked from mirrors/linux
		
	tcp: md5: check md5 signature without socket lock
Since a8afca032 (tcp: md5: protects md5sig_info with RCU) tcp_md5_do_lookup
doesn't require socket lock, rcu_read_lock is enough. Therefore socket lock is
no longer required for tcp_v{4,6}_inbound_md5_hash too, so we can move these
calls (wrapped with rcu_read_{,un}lock) before bh_lock_sock:
from tcp_v{4,6}_do_rcv to tcp_v{4,6}_rcv.
Signed-off-by: Dmitry Popov <ixaphire@qrator.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
			
			
This commit is contained in:
		
							parent
							
								
									269f8cb260
								
							
						
					
					
						commit
						9ea88a1530
					
				
					 2 changed files with 44 additions and 17 deletions
				
			
		|  | @ -1167,7 +1167,8 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, | |||
| } | ||||
| EXPORT_SYMBOL(tcp_v4_md5_hash_skb); | ||||
| 
 | ||||
| static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) | ||||
| static bool __tcp_v4_inbound_md5_hash(struct sock *sk, | ||||
| 				      const struct sk_buff *skb) | ||||
| { | ||||
| 	/*
 | ||||
| 	 * This gets called for each TCP segment that arrives | ||||
|  | @ -1220,6 +1221,17 @@ static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) | |||
| 	return false; | ||||
| } | ||||
| 
 | ||||
| static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) | ||||
| { | ||||
| 	bool ret; | ||||
| 
 | ||||
| 	rcu_read_lock(); | ||||
| 	ret = __tcp_v4_inbound_md5_hash(sk, skb); | ||||
| 	rcu_read_unlock(); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| #endif | ||||
| 
 | ||||
| static void tcp_v4_init_req(struct request_sock *req, struct sock *sk, | ||||
|  | @ -1432,16 +1444,6 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) | |||
| int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | ||||
| { | ||||
| 	struct sock *rsk; | ||||
| #ifdef CONFIG_TCP_MD5SIG | ||||
| 	/*
 | ||||
| 	 * We really want to reject the packet as early as possible | ||||
| 	 * if: | ||||
| 	 *  o We're expecting an MD5'd packet and this is no MD5 tcp option | ||||
| 	 *  o There is an MD5 option and we're not expecting one | ||||
| 	 */ | ||||
| 	if (tcp_v4_inbound_md5_hash(sk, skb)) | ||||
| 		goto discard; | ||||
| #endif | ||||
| 
 | ||||
| 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ | ||||
| 		struct dst_entry *dst = sk->sk_rx_dst; | ||||
|  | @ -1644,6 +1646,18 @@ int tcp_v4_rcv(struct sk_buff *skb) | |||
| 
 | ||||
| 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) | ||||
| 		goto discard_and_relse; | ||||
| 
 | ||||
| #ifdef CONFIG_TCP_MD5SIG | ||||
| 	/*
 | ||||
| 	 * We really want to reject the packet as early as possible | ||||
| 	 * if: | ||||
| 	 *  o We're expecting an MD5'd packet and this is no MD5 tcp option | ||||
| 	 *  o There is an MD5 option and we're not expecting one | ||||
| 	 */ | ||||
| 	if (tcp_v4_inbound_md5_hash(sk, skb)) | ||||
| 		goto discard_and_relse; | ||||
| #endif | ||||
| 
 | ||||
| 	nf_reset(skb); | ||||
| 
 | ||||
| 	if (sk_filter(sk, skb)) | ||||
|  |  | |||
|  | @ -667,7 +667,8 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, | |||
| 	return 1; | ||||
| } | ||||
| 
 | ||||
| static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) | ||||
| static int __tcp_v6_inbound_md5_hash(struct sock *sk, | ||||
| 				     const struct sk_buff *skb) | ||||
| { | ||||
| 	const __u8 *hash_location = NULL; | ||||
| 	struct tcp_md5sig_key *hash_expected; | ||||
|  | @ -707,6 +708,18 @@ static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) | |||
| 	} | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) | ||||
| { | ||||
| 	int ret; | ||||
| 
 | ||||
| 	rcu_read_lock(); | ||||
| 	ret = __tcp_v6_inbound_md5_hash(sk, skb); | ||||
| 	rcu_read_unlock(); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| #endif | ||||
| 
 | ||||
| static void tcp_v6_init_req(struct request_sock *req, struct sock *sk, | ||||
|  | @ -1247,11 +1260,6 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
| 	if (skb->protocol == htons(ETH_P_IP)) | ||||
| 		return tcp_v4_do_rcv(sk, skb); | ||||
| 
 | ||||
| #ifdef CONFIG_TCP_MD5SIG | ||||
| 	if (tcp_v6_inbound_md5_hash(sk, skb)) | ||||
| 		goto discard; | ||||
| #endif | ||||
| 
 | ||||
| 	if (sk_filter(sk, skb)) | ||||
| 		goto discard; | ||||
| 
 | ||||
|  | @ -1424,6 +1432,11 @@ static int tcp_v6_rcv(struct sk_buff *skb) | |||
| 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) | ||||
| 		goto discard_and_relse; | ||||
| 
 | ||||
| #ifdef CONFIG_TCP_MD5SIG | ||||
| 	if (tcp_v6_inbound_md5_hash(sk, skb)) | ||||
| 		goto discard_and_relse; | ||||
| #endif | ||||
| 
 | ||||
| 	if (sk_filter(sk, skb)) | ||||
| 		goto discard_and_relse; | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Dmitry Popov
						Dmitry Popov