mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	tcp: uninline tcp_prequeue()
tcp_prequeue() became too big to be inlined. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									f3564b2bb5
								
							
						
					
					
						commit
						b2fb4f54ec
					
				
					 2 changed files with 45 additions and 44 deletions
				
			
		| 
						 | 
				
			
			@ -1030,50 +1030,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
 | 
			
		|||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Packet is added to VJ-style prequeue for processing in process
 | 
			
		||||
 * context, if a reader task is waiting. Apparently, this exciting
 | 
			
		||||
 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
 | 
			
		||||
 * failed somewhere. Latency? Burstiness? Well, at least now we will
 | 
			
		||||
 * see, why it failed. 8)8)				  --ANK
 | 
			
		||||
 *
 | 
			
		||||
 * NOTE: is this not too big to inline?
 | 
			
		||||
 */
 | 
			
		||||
static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
 | 
			
		||||
{
 | 
			
		||||
	struct tcp_sock *tp = tcp_sk(sk);
 | 
			
		||||
 | 
			
		||||
	if (sysctl_tcp_low_latency || !tp->ucopy.task)
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	if (skb->len <= tcp_hdrlen(skb) &&
 | 
			
		||||
	    skb_queue_len(&tp->ucopy.prequeue) == 0)
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	__skb_queue_tail(&tp->ucopy.prequeue, skb);
 | 
			
		||||
	tp->ucopy.memory += skb->truesize;
 | 
			
		||||
	if (tp->ucopy.memory > sk->sk_rcvbuf) {
 | 
			
		||||
		struct sk_buff *skb1;
 | 
			
		||||
 | 
			
		||||
		BUG_ON(sock_owned_by_user(sk));
 | 
			
		||||
 | 
			
		||||
		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
 | 
			
		||||
			sk_backlog_rcv(sk, skb1);
 | 
			
		||||
			NET_INC_STATS_BH(sock_net(sk),
 | 
			
		||||
					 LINUX_MIB_TCPPREQUEUEDROPPED);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		tp->ucopy.memory = 0;
 | 
			
		||||
	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
 | 
			
		||||
		wake_up_interruptible_sync_poll(sk_sleep(sk),
 | 
			
		||||
					   POLLIN | POLLRDNORM | POLLRDBAND);
 | 
			
		||||
		if (!inet_csk_ack_scheduled(sk))
 | 
			
		||||
			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
 | 
			
		||||
						  (3 * tcp_rto_min(sk)) / 4,
 | 
			
		||||
						  TCP_RTO_MAX);
 | 
			
		||||
	}
 | 
			
		||||
	return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
extern bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
 | 
			
		||||
 | 
			
		||||
#undef STATE_TRACE
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1950,6 +1950,50 @@ void tcp_v4_early_demux(struct sk_buff *skb)
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Packet is added to VJ-style prequeue for processing in process
 | 
			
		||||
 * context, if a reader task is waiting. Apparently, this exciting
 | 
			
		||||
 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
 | 
			
		||||
 * failed somewhere. Latency? Burstiness? Well, at least now we will
 | 
			
		||||
 * see, why it failed. 8)8)				  --ANK
 | 
			
		||||
 *
 | 
			
		||||
 */
 | 
			
		||||
bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
 | 
			
		||||
{
 | 
			
		||||
	struct tcp_sock *tp = tcp_sk(sk);
 | 
			
		||||
 | 
			
		||||
	if (sysctl_tcp_low_latency || !tp->ucopy.task)
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	if (skb->len <= tcp_hdrlen(skb) &&
 | 
			
		||||
	    skb_queue_len(&tp->ucopy.prequeue) == 0)
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	__skb_queue_tail(&tp->ucopy.prequeue, skb);
 | 
			
		||||
	tp->ucopy.memory += skb->truesize;
 | 
			
		||||
	if (tp->ucopy.memory > sk->sk_rcvbuf) {
 | 
			
		||||
		struct sk_buff *skb1;
 | 
			
		||||
 | 
			
		||||
		BUG_ON(sock_owned_by_user(sk));
 | 
			
		||||
 | 
			
		||||
		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
 | 
			
		||||
			sk_backlog_rcv(sk, skb1);
 | 
			
		||||
			NET_INC_STATS_BH(sock_net(sk),
 | 
			
		||||
					 LINUX_MIB_TCPPREQUEUEDROPPED);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		tp->ucopy.memory = 0;
 | 
			
		||||
	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
 | 
			
		||||
		wake_up_interruptible_sync_poll(sk_sleep(sk),
 | 
			
		||||
					   POLLIN | POLLRDNORM | POLLRDBAND);
 | 
			
		||||
		if (!inet_csk_ack_scheduled(sk))
 | 
			
		||||
			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
 | 
			
		||||
						  (3 * tcp_rto_min(sk)) / 4,
 | 
			
		||||
						  TCP_RTO_MAX);
 | 
			
		||||
	}
 | 
			
		||||
	return true;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(tcp_prequeue);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 *	From tcp_input.c
 | 
			
		||||
 */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue