forked from mirrors/linux
		
	tcp: annotate sk->sk_rcvbuf lockless reads
For the sake of tcp_poll(), there are few places where we fetch sk->sk_rcvbuf while this field can change from IRQ or other cpu. We need to add READ_ONCE() annotations, and also make sure write sides use corresponding WRITE_ONCE() to avoid store-tearing. Note that other transports probably need similar fixes. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									d9b55bf7b6
								
							
						
					
					
						commit
						ebb3b78db7
					
				
					 7 changed files with 15 additions and 12 deletions
				
			
		| 
						 | 
					@ -1380,14 +1380,14 @@ static inline int tcp_win_from_space(const struct sock *sk, int space)
 | 
				
			||||||
/* Note: caller must be prepared to deal with negative returns */
 | 
					/* Note: caller must be prepared to deal with negative returns */
 | 
				
			||||||
static inline int tcp_space(const struct sock *sk)
 | 
					static inline int tcp_space(const struct sock *sk)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return tcp_win_from_space(sk, sk->sk_rcvbuf -
 | 
						return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
 | 
				
			||||||
				  READ_ONCE(sk->sk_backlog.len) -
 | 
									  READ_ONCE(sk->sk_backlog.len) -
 | 
				
			||||||
				  atomic_read(&sk->sk_rmem_alloc));
 | 
									  atomic_read(&sk->sk_rmem_alloc));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline int tcp_full_space(const struct sock *sk)
 | 
					static inline int tcp_full_space(const struct sock *sk)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return tcp_win_from_space(sk, sk->sk_rcvbuf);
 | 
						return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void tcp_openreq_init_rwin(struct request_sock *req,
 | 
					extern void tcp_openreq_init_rwin(struct request_sock *req,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -82,7 +82,7 @@ TRACE_EVENT(sock_rcvqueue_full,
 | 
				
			||||||
	TP_fast_assign(
 | 
						TP_fast_assign(
 | 
				
			||||||
		__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
 | 
							__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
 | 
				
			||||||
		__entry->truesize   = skb->truesize;
 | 
							__entry->truesize   = skb->truesize;
 | 
				
			||||||
		__entry->sk_rcvbuf  = sk->sk_rcvbuf;
 | 
							__entry->sk_rcvbuf  = READ_ONCE(sk->sk_rcvbuf);
 | 
				
			||||||
	),
 | 
						),
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	TP_printk("rmem_alloc=%d truesize=%u sk_rcvbuf=%d",
 | 
						TP_printk("rmem_alloc=%d truesize=%u sk_rcvbuf=%d",
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4252,7 +4252,8 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
 | 
				
			||||||
		case SO_RCVBUF:
 | 
							case SO_RCVBUF:
 | 
				
			||||||
			val = min_t(u32, val, sysctl_rmem_max);
 | 
								val = min_t(u32, val, sysctl_rmem_max);
 | 
				
			||||||
			sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
 | 
								sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
 | 
				
			||||||
			sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
 | 
								WRITE_ONCE(sk->sk_rcvbuf,
 | 
				
			||||||
 | 
									   max_t(int, val * 2, SOCK_MIN_RCVBUF));
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		case SO_SNDBUF:
 | 
							case SO_SNDBUF:
 | 
				
			||||||
			val = min_t(u32, val, sysctl_wmem_max);
 | 
								val = min_t(u32, val, sysctl_wmem_max);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4415,7 +4415,7 @@ static void skb_set_err_queue(struct sk_buff *skb)
 | 
				
			||||||
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
 | 
					int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
 | 
						if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
 | 
				
			||||||
	    (unsigned int)sk->sk_rcvbuf)
 | 
						    (unsigned int)READ_ONCE(sk->sk_rcvbuf))
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	skb_orphan(skb);
 | 
						skb_orphan(skb);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -831,7 +831,8 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
 | 
				
			||||||
		 * returning the value we actually used in getsockopt
 | 
							 * returning the value we actually used in getsockopt
 | 
				
			||||||
		 * is the most desirable behavior.
 | 
							 * is the most desirable behavior.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
 | 
							WRITE_ONCE(sk->sk_rcvbuf,
 | 
				
			||||||
 | 
								   max_t(int, val * 2, SOCK_MIN_RCVBUF));
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	case SO_RCVBUFFORCE:
 | 
						case SO_RCVBUFFORCE:
 | 
				
			||||||
| 
						 | 
					@ -3204,7 +3205,7 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem)
 | 
				
			||||||
	memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
 | 
						memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
 | 
						mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
 | 
				
			||||||
	mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
 | 
						mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
 | 
				
			||||||
	mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
 | 
						mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
 | 
				
			||||||
	mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
 | 
						mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
 | 
				
			||||||
	mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
 | 
						mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -451,7 +451,7 @@ void tcp_init_sock(struct sock *sk)
 | 
				
			||||||
	icsk->icsk_sync_mss = tcp_sync_mss;
 | 
						icsk->icsk_sync_mss = tcp_sync_mss;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
 | 
						sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
 | 
				
			||||||
	sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
 | 
						WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	sk_sockets_allocated_inc(sk);
 | 
						sk_sockets_allocated_inc(sk);
 | 
				
			||||||
	sk->sk_route_forced_caps = NETIF_F_GSO;
 | 
						sk->sk_route_forced_caps = NETIF_F_GSO;
 | 
				
			||||||
| 
						 | 
					@ -1711,7 +1711,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	val <<= 1;
 | 
						val <<= 1;
 | 
				
			||||||
	if (val > sk->sk_rcvbuf) {
 | 
						if (val > sk->sk_rcvbuf) {
 | 
				
			||||||
		sk->sk_rcvbuf = val;
 | 
							WRITE_ONCE(sk->sk_rcvbuf, val);
 | 
				
			||||||
		tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val);
 | 
							tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -483,8 +483,9 @@ static void tcp_clamp_window(struct sock *sk)
 | 
				
			||||||
	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
 | 
						    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
 | 
				
			||||||
	    !tcp_under_memory_pressure(sk) &&
 | 
						    !tcp_under_memory_pressure(sk) &&
 | 
				
			||||||
	    sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
 | 
						    sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
 | 
				
			||||||
		sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
 | 
							WRITE_ONCE(sk->sk_rcvbuf,
 | 
				
			||||||
				    net->ipv4.sysctl_tcp_rmem[2]);
 | 
								   min(atomic_read(&sk->sk_rmem_alloc),
 | 
				
			||||||
 | 
								       net->ipv4.sysctl_tcp_rmem[2]));
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
 | 
						if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
 | 
				
			||||||
		tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
 | 
							tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
 | 
				
			||||||
| 
						 | 
					@ -648,7 +649,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
 | 
				
			||||||
		rcvbuf = min_t(u64, rcvwin * rcvmem,
 | 
							rcvbuf = min_t(u64, rcvwin * rcvmem,
 | 
				
			||||||
			       sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
 | 
								       sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
 | 
				
			||||||
		if (rcvbuf > sk->sk_rcvbuf) {
 | 
							if (rcvbuf > sk->sk_rcvbuf) {
 | 
				
			||||||
			sk->sk_rcvbuf = rcvbuf;
 | 
								WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			/* Make the window clamp follow along.  */
 | 
								/* Make the window clamp follow along.  */
 | 
				
			||||||
			tp->window_clamp = tcp_win_from_space(sk, rcvbuf);
 | 
								tp->window_clamp = tcp_win_from_space(sk, rcvbuf);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue