forked from mirrors/linux
		
	netfilter: keep conntrack reference until IPsecv6 policy checks are done
Keep the conntrack reference until policy checks have been performed for
IPsec V6 NAT support, just like ipv4.
The reference needs to be dropped before a packet is
queued to avoid having the conntrack module unloadable.
Fixes: 58a317f106 ("netfilter: ipv6: add IPv6 NAT support")
Signed-off-by: Madhu Koriginja <madhu.koriginja@nxp.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
			
			
This commit is contained in:
		
							parent
							
								
									36ce9982ef
								
							
						
					
					
						commit
						b0e214d212
					
				
					 5 changed files with 13 additions and 11 deletions
				
			
		|  | @ -784,6 +784,7 @@ static int dccp_v6_rcv(struct sk_buff *skb) | ||||||
| 
 | 
 | ||||||
| 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) | 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) | ||||||
| 		goto discard_and_relse; | 		goto discard_and_relse; | ||||||
|  | 	nf_reset_ct(skb); | ||||||
| 
 | 
 | ||||||
| 	return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, | 	return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, | ||||||
| 				refcounted) ? -1 : 0; | 				refcounted) ? -1 : 0; | ||||||
|  |  | ||||||
|  | @ -404,10 +404,6 @@ void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr, | ||||||
| 			/* Only do this once for first final protocol */ | 			/* Only do this once for first final protocol */ | ||||||
| 			have_final = true; | 			have_final = true; | ||||||
| 
 | 
 | ||||||
| 			/* Free reference early: we don't need it any more,
 |  | ||||||
| 			   and it may hold ip_conntrack module loaded |  | ||||||
| 			   indefinitely. */ |  | ||||||
| 			nf_reset_ct(skb); |  | ||||||
| 
 | 
 | ||||||
| 			skb_postpull_rcsum(skb, skb_network_header(skb), | 			skb_postpull_rcsum(skb, skb_network_header(skb), | ||||||
| 					   skb_network_header_len(skb)); | 					   skb_network_header_len(skb)); | ||||||
|  | @ -430,10 +426,12 @@ void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr, | ||||||
| 				goto discard; | 				goto discard; | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
| 		if (!(ipprot->flags & INET6_PROTO_NOPOLICY) && | 		if (!(ipprot->flags & INET6_PROTO_NOPOLICY)) { | ||||||
| 		    !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { | 			if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { | ||||||
| 			SKB_DR_SET(reason, XFRM_POLICY); | 				SKB_DR_SET(reason, XFRM_POLICY); | ||||||
| 			goto discard; | 				goto discard; | ||||||
|  | 			} | ||||||
|  | 			nf_reset_ct(skb); | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		ret = INDIRECT_CALL_2(ipprot->handler, tcp_v6_rcv, udpv6_rcv, | 		ret = INDIRECT_CALL_2(ipprot->handler, tcp_v6_rcv, udpv6_rcv, | ||||||
|  |  | ||||||
|  | @ -194,10 +194,8 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) | ||||||
| 			struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); | 			struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); | ||||||
| 
 | 
 | ||||||
| 			/* Not releasing hash table! */ | 			/* Not releasing hash table! */ | ||||||
| 			if (clone) { | 			if (clone) | ||||||
| 				nf_reset_ct(clone); |  | ||||||
| 				rawv6_rcv(sk, clone); | 				rawv6_rcv(sk, clone); | ||||||
| 			} |  | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	rcu_read_unlock(); | 	rcu_read_unlock(); | ||||||
|  | @ -391,6 +389,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) | ||||||
| 		kfree_skb_reason(skb, SKB_DROP_REASON_XFRM_POLICY); | 		kfree_skb_reason(skb, SKB_DROP_REASON_XFRM_POLICY); | ||||||
| 		return NET_RX_DROP; | 		return NET_RX_DROP; | ||||||
| 	} | 	} | ||||||
|  | 	nf_reset_ct(skb); | ||||||
| 
 | 
 | ||||||
| 	if (!rp->checksum) | 	if (!rp->checksum) | ||||||
| 		skb->ip_summed = CHECKSUM_UNNECESSARY; | 		skb->ip_summed = CHECKSUM_UNNECESSARY; | ||||||
|  |  | ||||||
|  | @ -1723,6 +1723,8 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) | ||||||
| 	if (drop_reason) | 	if (drop_reason) | ||||||
| 		goto discard_and_relse; | 		goto discard_and_relse; | ||||||
| 
 | 
 | ||||||
|  | 	nf_reset_ct(skb); | ||||||
|  | 
 | ||||||
| 	if (tcp_filter(sk, skb)) { | 	if (tcp_filter(sk, skb)) { | ||||||
| 		drop_reason = SKB_DROP_REASON_SOCKET_FILTER; | 		drop_reason = SKB_DROP_REASON_SOCKET_FILTER; | ||||||
| 		goto discard_and_relse; | 		goto discard_and_relse; | ||||||
|  |  | ||||||
|  | @ -704,6 +704,7 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) | ||||||
| 		drop_reason = SKB_DROP_REASON_XFRM_POLICY; | 		drop_reason = SKB_DROP_REASON_XFRM_POLICY; | ||||||
| 		goto drop; | 		goto drop; | ||||||
| 	} | 	} | ||||||
|  | 	nf_reset_ct(skb); | ||||||
| 
 | 
 | ||||||
| 	if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) { | 	if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) { | ||||||
| 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); | 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); | ||||||
|  | @ -1027,6 +1028,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | ||||||
| 
 | 
 | ||||||
| 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) | 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) | ||||||
| 		goto discard; | 		goto discard; | ||||||
|  | 	nf_reset_ct(skb); | ||||||
| 
 | 
 | ||||||
| 	if (udp_lib_checksum_complete(skb)) | 	if (udp_lib_checksum_complete(skb)) | ||||||
| 		goto csum_error; | 		goto csum_error; | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Madhu Koriginja
						Madhu Koriginja