forked from mirrors/linux
		
	tcp: don't annotate mark on control socket from tcp_v6_send_response()
Unlike ipv4, this control socket is shared by all cpus so we cannot use
it as scratchpad area to annotate the mark that we pass to ip6_xmit().
Add a new parameter to ip6_xmit() to indicate the mark. The SCTP socket
family caches the flowi6 structure in the sctp_transport structure, so
we cannot use to carry the mark unless we later on reset it back, which
I discarded since it looks ugly to me.
Fixes: bf99b4ded5 ("tcp: fix mark propagation with fwmark_reflect enabled")
Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
			
			
This commit is contained in:
		
							parent
							
								
									a47b70ea86
								
							
						
					
					
						commit
						92e55f412c
					
				
					 6 changed files with 10 additions and 10 deletions
				
			
		|  | @ -871,7 +871,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb); | |||
|  *	upper-layer output functions | ||||
|  */ | ||||
| int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, | ||||
| 	     struct ipv6_txoptions *opt, int tclass); | ||||
| 	     __u32 mark, struct ipv6_txoptions *opt, int tclass); | ||||
| 
 | ||||
| int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); | ||||
| 
 | ||||
|  |  | |||
|  | @ -227,7 +227,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req | |||
| 		opt = ireq->ipv6_opt; | ||||
| 		if (!opt) | ||||
| 			opt = rcu_dereference(np->opt); | ||||
| 		err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); | ||||
| 		err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass); | ||||
| 		rcu_read_unlock(); | ||||
| 		err = net_xmit_eval(err); | ||||
| 	} | ||||
|  | @ -281,7 +281,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb) | |||
| 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); | ||||
| 	if (!IS_ERR(dst)) { | ||||
| 		skb_dst_set(skb, dst); | ||||
| 		ip6_xmit(ctl_sk, skb, &fl6, NULL, 0); | ||||
| 		ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0); | ||||
| 		DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | ||||
| 		DCCP_INC_STATS(DCCP_MIB_OUTRSTS); | ||||
| 		return; | ||||
|  |  | |||
|  | @ -176,7 +176,7 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused | |||
| 	/* Restore final destination back after routing done */ | ||||
| 	fl6.daddr = sk->sk_v6_daddr; | ||||
| 
 | ||||
| 	res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt), | ||||
| 	res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt), | ||||
| 		       np->tclass); | ||||
| 	rcu_read_unlock(); | ||||
| 	return res; | ||||
|  |  | |||
|  | @ -172,7 +172,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb) | |||
|  * which are using proper atomic operations or spinlocks. | ||||
|  */ | ||||
| int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, | ||||
| 	     struct ipv6_txoptions *opt, int tclass) | ||||
| 	     __u32 mark, struct ipv6_txoptions *opt, int tclass) | ||||
| { | ||||
| 	struct net *net = sock_net(sk); | ||||
| 	const struct ipv6_pinfo *np = inet6_sk(sk); | ||||
|  | @ -240,7 +240,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, | |||
| 
 | ||||
| 	skb->protocol = htons(ETH_P_IPV6); | ||||
| 	skb->priority = sk->sk_priority; | ||||
| 	skb->mark = sk->sk_mark; | ||||
| 	skb->mark = mark; | ||||
| 
 | ||||
| 	mtu = dst_mtu(dst); | ||||
| 	if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) { | ||||
|  |  | |||
|  | @ -469,7 +469,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, | |||
| 		opt = ireq->ipv6_opt; | ||||
| 		if (!opt) | ||||
| 			opt = rcu_dereference(np->opt); | ||||
| 		err = ip6_xmit(sk, skb, fl6, opt, np->tclass); | ||||
| 		err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass); | ||||
| 		rcu_read_unlock(); | ||||
| 		err = net_xmit_eval(err); | ||||
| 	} | ||||
|  | @ -840,8 +840,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 | |||
| 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); | ||||
| 	if (!IS_ERR(dst)) { | ||||
| 		skb_dst_set(buff, dst); | ||||
| 		ctl_sk->sk_mark = fl6.flowi6_mark; | ||||
| 		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); | ||||
| 		ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass); | ||||
| 		TCP_INC_STATS(net, TCP_MIB_OUTSEGS); | ||||
| 		if (rst) | ||||
| 			TCP_INC_STATS(net, TCP_MIB_OUTRSTS); | ||||
|  |  | |||
|  | @ -222,7 +222,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) | |||
| 	SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); | ||||
| 
 | ||||
| 	rcu_read_lock(); | ||||
| 	res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass); | ||||
| 	res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt), | ||||
| 		       np->tclass); | ||||
| 	rcu_read_unlock(); | ||||
| 	return res; | ||||
| } | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Pablo Neira
						Pablo Neira