mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	tcp: tsq: move tsq_flags close to sk_wmem_alloc
tsq_flags being in the same cache line than sk_wmem_alloc makes a lot of sense. Both fields are changed from tcp_wfree() and more generally by various TSQ related functions. Prior patch made room in struct sock and added sk_tsq_flags, this patch deletes tsq_flags from struct tcp_sock. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									9115e8cd2a
								
							
						
					
					
						commit
						7aa5470c2c
					
				
					 6 changed files with 17 additions and 20 deletions
				
			
		| 
						 | 
					@ -186,7 +186,6 @@ struct tcp_sock {
 | 
				
			||||||
	u32	tsoffset;	/* timestamp offset */
 | 
						u32	tsoffset;	/* timestamp offset */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
 | 
						struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
 | 
				
			||||||
	unsigned long	tsq_flags;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Data for direct copy to user */
 | 
						/* Data for direct copy to user */
 | 
				
			||||||
	struct {
 | 
						struct {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -663,9 +663,9 @@ static void tcp_push(struct sock *sk, int flags, int mss_now,
 | 
				
			||||||
	if (tcp_should_autocork(sk, skb, size_goal)) {
 | 
						if (tcp_should_autocork(sk, skb, size_goal)) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* avoid atomic op if TSQ_THROTTLED bit is already set */
 | 
							/* avoid atomic op if TSQ_THROTTLED bit is already set */
 | 
				
			||||||
		if (!test_bit(TSQ_THROTTLED, &tp->tsq_flags)) {
 | 
							if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
 | 
				
			||||||
			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
 | 
								NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
 | 
				
			||||||
			set_bit(TSQ_THROTTLED, &tp->tsq_flags);
 | 
								set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		/* It is possible TX completion already happened
 | 
							/* It is possible TX completion already happened
 | 
				
			||||||
		 * before we set TSQ_THROTTLED.
 | 
							 * before we set TSQ_THROTTLED.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -443,7 +443,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 | 
				
			||||||
			if (!sock_owned_by_user(sk)) {
 | 
								if (!sock_owned_by_user(sk)) {
 | 
				
			||||||
				tcp_v4_mtu_reduced(sk);
 | 
									tcp_v4_mtu_reduced(sk);
 | 
				
			||||||
			} else {
 | 
								} else {
 | 
				
			||||||
				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
 | 
									if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
 | 
				
			||||||
					sock_hold(sk);
 | 
										sock_hold(sk);
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			goto out;
 | 
								goto out;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -767,14 +767,15 @@ static void tcp_tasklet_func(unsigned long data)
 | 
				
			||||||
	list_for_each_safe(q, n, &list) {
 | 
						list_for_each_safe(q, n, &list) {
 | 
				
			||||||
		tp = list_entry(q, struct tcp_sock, tsq_node);
 | 
							tp = list_entry(q, struct tcp_sock, tsq_node);
 | 
				
			||||||
		list_del(&tp->tsq_node);
 | 
							list_del(&tp->tsq_node);
 | 
				
			||||||
		clear_bit(TSQ_QUEUED, &tp->tsq_flags);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		sk = (struct sock *)tp;
 | 
							sk = (struct sock *)tp;
 | 
				
			||||||
 | 
							clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!sk->sk_lock.owned &&
 | 
							if (!sk->sk_lock.owned &&
 | 
				
			||||||
		    test_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags)) {
 | 
							    test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) {
 | 
				
			||||||
			bh_lock_sock(sk);
 | 
								bh_lock_sock(sk);
 | 
				
			||||||
			if (!sock_owned_by_user(sk)) {
 | 
								if (!sock_owned_by_user(sk)) {
 | 
				
			||||||
				clear_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
 | 
									clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags);
 | 
				
			||||||
				tcp_tsq_handler(sk);
 | 
									tcp_tsq_handler(sk);
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			bh_unlock_sock(sk);
 | 
								bh_unlock_sock(sk);
 | 
				
			||||||
| 
						 | 
					@ -797,16 +798,15 @@ static void tcp_tasklet_func(unsigned long data)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void tcp_release_cb(struct sock *sk)
 | 
					void tcp_release_cb(struct sock *sk)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct tcp_sock *tp = tcp_sk(sk);
 | 
					 | 
				
			||||||
	unsigned long flags, nflags;
 | 
						unsigned long flags, nflags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* perform an atomic operation only if at least one flag is set */
 | 
						/* perform an atomic operation only if at least one flag is set */
 | 
				
			||||||
	do {
 | 
						do {
 | 
				
			||||||
		flags = tp->tsq_flags;
 | 
							flags = sk->sk_tsq_flags;
 | 
				
			||||||
		if (!(flags & TCP_DEFERRED_ALL))
 | 
							if (!(flags & TCP_DEFERRED_ALL))
 | 
				
			||||||
			return;
 | 
								return;
 | 
				
			||||||
		nflags = flags & ~TCP_DEFERRED_ALL;
 | 
							nflags = flags & ~TCP_DEFERRED_ALL;
 | 
				
			||||||
	} while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
 | 
						} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (flags & TCPF_TSQ_DEFERRED)
 | 
						if (flags & TCPF_TSQ_DEFERRED)
 | 
				
			||||||
		tcp_tsq_handler(sk);
 | 
							tcp_tsq_handler(sk);
 | 
				
			||||||
| 
						 | 
					@ -878,7 +878,7 @@ void tcp_wfree(struct sk_buff *skb)
 | 
				
			||||||
	if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
 | 
						if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (oval = READ_ONCE(tp->tsq_flags);; oval = nval) {
 | 
						for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
 | 
				
			||||||
		struct tsq_tasklet *tsq;
 | 
							struct tsq_tasklet *tsq;
 | 
				
			||||||
		bool empty;
 | 
							bool empty;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -886,7 +886,7 @@ void tcp_wfree(struct sk_buff *skb)
 | 
				
			||||||
			goto out;
 | 
								goto out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED;
 | 
							nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED;
 | 
				
			||||||
		nval = cmpxchg(&tp->tsq_flags, oval, nval);
 | 
							nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
 | 
				
			||||||
		if (nval != oval)
 | 
							if (nval != oval)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2100,7 +2100,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
 | 
				
			||||||
		    skb->prev == sk->sk_write_queue.next)
 | 
							    skb->prev == sk->sk_write_queue.next)
 | 
				
			||||||
			return false;
 | 
								return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		set_bit(TSQ_THROTTLED, &tcp_sk(sk)->tsq_flags);
 | 
							set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
 | 
				
			||||||
		/* It is possible TX completion already happened
 | 
							/* It is possible TX completion already happened
 | 
				
			||||||
		 * before we set TSQ_THROTTLED, so we must
 | 
							 * before we set TSQ_THROTTLED, so we must
 | 
				
			||||||
		 * test again the condition.
 | 
							 * test again the condition.
 | 
				
			||||||
| 
						 | 
					@ -2241,8 +2241,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
 | 
				
			||||||
		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
 | 
							    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (test_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags))
 | 
							if (test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
 | 
				
			||||||
			clear_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
 | 
								clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags);
 | 
				
			||||||
		if (tcp_small_queue_check(sk, skb, 0))
 | 
							if (tcp_small_queue_check(sk, skb, 0))
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3545,8 +3545,6 @@ void tcp_send_ack(struct sock *sk)
 | 
				
			||||||
	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
 | 
						/* We do not want pure acks influencing TCP Small Queues or fq/pacing
 | 
				
			||||||
	 * too much.
 | 
						 * too much.
 | 
				
			||||||
	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
 | 
						 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
 | 
				
			||||||
	 * We also avoid tcp_wfree() overhead (cache line miss accessing
 | 
					 | 
				
			||||||
	 * tp->tsq_flags) by using regular sock_wfree()
 | 
					 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	skb_set_tcp_pure_ack(buff);
 | 
						skb_set_tcp_pure_ack(buff);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -310,7 +310,7 @@ static void tcp_delack_timer(unsigned long data)
 | 
				
			||||||
		inet_csk(sk)->icsk_ack.blocked = 1;
 | 
							inet_csk(sk)->icsk_ack.blocked = 1;
 | 
				
			||||||
		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
 | 
							__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
 | 
				
			||||||
		/* deleguate our work to tcp_release_cb() */
 | 
							/* deleguate our work to tcp_release_cb() */
 | 
				
			||||||
		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
 | 
							if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
 | 
				
			||||||
			sock_hold(sk);
 | 
								sock_hold(sk);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	bh_unlock_sock(sk);
 | 
						bh_unlock_sock(sk);
 | 
				
			||||||
| 
						 | 
					@ -592,7 +592,7 @@ static void tcp_write_timer(unsigned long data)
 | 
				
			||||||
		tcp_write_timer_handler(sk);
 | 
							tcp_write_timer_handler(sk);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		/* delegate our work to tcp_release_cb() */
 | 
							/* delegate our work to tcp_release_cb() */
 | 
				
			||||||
		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
 | 
							if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
 | 
				
			||||||
			sock_hold(sk);
 | 
								sock_hold(sk);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	bh_unlock_sock(sk);
 | 
						bh_unlock_sock(sk);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -399,7 +399,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 | 
				
			||||||
		if (!sock_owned_by_user(sk))
 | 
							if (!sock_owned_by_user(sk))
 | 
				
			||||||
			tcp_v6_mtu_reduced(sk);
 | 
								tcp_v6_mtu_reduced(sk);
 | 
				
			||||||
		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
 | 
							else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
 | 
				
			||||||
					   &tp->tsq_flags))
 | 
										   &sk->sk_tsq_flags))
 | 
				
			||||||
			sock_hold(sk);
 | 
								sock_hold(sk);
 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue