forked from mirrors/linux
		
	net: convert sock.sk_wmem_alloc from atomic_t to refcount_t
refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. Signed-off-by: Elena Reshetova <elena.reshetova@intel.com> Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com> Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: David Windsor <dwindsor@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									2638595afc
								
							
						
					
					
						commit
						14afee4b60
					
				
					 37 changed files with 74 additions and 85 deletions
				
			
		|  | @ -924,12 +924,7 @@ fore200e_tx_irq(struct fore200e* fore200e) | |||
| 		else { | ||||
| 		    dev_kfree_skb_any(entry->skb); | ||||
| 		} | ||||
| #if 1 | ||||
| 		/* race fixed by the above incarnation mechanism, but... */ | ||||
| 		if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) { | ||||
| 		    atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0); | ||||
| 		} | ||||
| #endif | ||||
| 
 | ||||
| 		/* check error condition */ | ||||
| 		if (*entry->status & STATUS_ERROR) | ||||
| 		    atomic_inc(&vcc->stats->tx_err); | ||||
|  | @ -1130,13 +1125,9 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp | |||
| 	return -ENOMEM; | ||||
|     } | ||||
| 
 | ||||
|     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); | ||||
| 
 | ||||
|     vcc->push(vcc, skb); | ||||
|     atomic_inc(&vcc->stats->rx); | ||||
| 
 | ||||
|     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); | ||||
| 
 | ||||
|     return 0; | ||||
| } | ||||
| 
 | ||||
|  | @ -1572,7 +1563,6 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb) | |||
|     unsigned long           flags; | ||||
| 
 | ||||
|     ASSERT(vcc); | ||||
|     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); | ||||
|     ASSERT(fore200e); | ||||
|     ASSERT(fore200e_vcc); | ||||
| 
 | ||||
|  |  | |||
|  | @ -2395,7 +2395,7 @@ he_close(struct atm_vcc *vcc) | |||
| 		 * TBRQ, the host issues the close command to the adapter. | ||||
| 		 */ | ||||
| 
 | ||||
| 		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) && | ||||
| 		while (((tx_inuse = refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) && | ||||
| 		       (retry < MAX_RETRY)) { | ||||
| 			msleep(sleep); | ||||
| 			if (sleep < 250) | ||||
|  |  | |||
|  | @ -724,7 +724,7 @@ push_on_scq(struct idt77252_dev *card, struct vc_map *vc, struct sk_buff *skb) | |||
| 		struct sock *sk = sk_atm(vcc); | ||||
| 
 | ||||
| 		vc->estimator->cells += (skb->len + 47) / 48; | ||||
| 		if (atomic_read(&sk->sk_wmem_alloc) > | ||||
| 		if (refcount_read(&sk->sk_wmem_alloc) > | ||||
| 		    (sk->sk_sndbuf >> 1)) { | ||||
| 			u32 cps = vc->estimator->maxcps; | ||||
| 
 | ||||
|  | @ -2009,7 +2009,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags) | |||
| 		atomic_inc(&vcc->stats->tx_err); | ||||
| 		return -ENOMEM; | ||||
| 	} | ||||
| 	atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | ||||
| 	refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | ||||
| 
 | ||||
| 	skb_put_data(skb, cell, 52); | ||||
| 
 | ||||
|  |  | |||
|  | @ -254,7 +254,7 @@ static inline void atm_return(struct atm_vcc *vcc,int truesize) | |||
| 
 | ||||
| static inline int atm_may_send(struct atm_vcc *vcc,unsigned int size) | ||||
| { | ||||
| 	return (size + atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) < | ||||
| 	return (size + refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) < | ||||
| 	       sk_atm(vcc)->sk_sndbuf; | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -390,7 +390,7 @@ struct sock { | |||
| 
 | ||||
| 	/* ===== cache line for TX ===== */ | ||||
| 	int			sk_wmem_queued; | ||||
| 	atomic_t		sk_wmem_alloc; | ||||
| 	refcount_t		sk_wmem_alloc; | ||||
| 	unsigned long		sk_tsq_flags; | ||||
| 	struct sk_buff		*sk_send_head; | ||||
| 	struct sk_buff_head	sk_write_queue; | ||||
|  | @ -1911,7 +1911,7 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *fro | |||
|  */ | ||||
| static inline int sk_wmem_alloc_get(const struct sock *sk) | ||||
| { | ||||
| 	return atomic_read(&sk->sk_wmem_alloc) - 1; | ||||
| 	return refcount_read(&sk->sk_wmem_alloc) - 1; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  | @ -2055,7 +2055,7 @@ static inline unsigned long sock_wspace(struct sock *sk) | |||
| 	int amt = 0; | ||||
| 
 | ||||
| 	if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { | ||||
| 		amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); | ||||
| 		amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc); | ||||
| 		if (amt < 0) | ||||
| 			amt = 0; | ||||
| 	} | ||||
|  | @ -2136,7 +2136,7 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag); | |||
|  */ | ||||
| static inline bool sock_writeable(const struct sock *sk) | ||||
| { | ||||
| 	return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); | ||||
| 	return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); | ||||
| } | ||||
| 
 | ||||
| static inline gfp_t gfp_any(void) | ||||
|  |  | |||
|  | @ -252,7 +252,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev, | |||
| 
 | ||||
| 	ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc; | ||||
| 	pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev); | ||||
| 	atomic_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc); | ||||
| 	refcount_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc); | ||||
| 	ATM_SKB(skb)->atm_options = atmvcc->atm_options; | ||||
| 	dev->stats.tx_packets++; | ||||
| 	dev->stats.tx_bytes += skb->len; | ||||
|  |  | |||
|  | @ -381,7 +381,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb, | |||
| 		memcpy(here, llc_oui, sizeof(llc_oui)); | ||||
| 		((__be16 *) here)[3] = skb->protocol; | ||||
| 	} | ||||
| 	atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | ||||
| 	refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | ||||
| 	ATM_SKB(skb)->atm_options = vcc->atm_options; | ||||
| 	entry->vccs->last_use = jiffies; | ||||
| 	pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev); | ||||
|  |  | |||
|  | @ -80,9 +80,9 @@ static void vcc_sock_destruct(struct sock *sk) | |||
| 		printk(KERN_DEBUG "%s: rmem leakage (%d bytes) detected.\n", | ||||
| 		       __func__, atomic_read(&sk->sk_rmem_alloc)); | ||||
| 
 | ||||
| 	if (atomic_read(&sk->sk_wmem_alloc)) | ||||
| 	if (refcount_read(&sk->sk_wmem_alloc)) | ||||
| 		printk(KERN_DEBUG "%s: wmem leakage (%d bytes) detected.\n", | ||||
| 		       __func__, atomic_read(&sk->sk_wmem_alloc)); | ||||
| 		       __func__, refcount_read(&sk->sk_wmem_alloc)); | ||||
| } | ||||
| 
 | ||||
| static void vcc_def_wakeup(struct sock *sk) | ||||
|  | @ -101,7 +101,7 @@ static inline int vcc_writable(struct sock *sk) | |||
| 	struct atm_vcc *vcc = atm_sk(sk); | ||||
| 
 | ||||
| 	return (vcc->qos.txtp.max_sdu + | ||||
| 		atomic_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf; | ||||
| 		refcount_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf; | ||||
| } | ||||
| 
 | ||||
| static void vcc_write_space(struct sock *sk) | ||||
|  | @ -156,7 +156,7 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family, i | |||
| 	memset(&vcc->local, 0, sizeof(struct sockaddr_atmsvc)); | ||||
| 	memset(&vcc->remote, 0, sizeof(struct sockaddr_atmsvc)); | ||||
| 	vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */ | ||||
| 	atomic_set(&sk->sk_wmem_alloc, 1); | ||||
| 	refcount_set(&sk->sk_wmem_alloc, 1); | ||||
| 	atomic_set(&sk->sk_rmem_alloc, 0); | ||||
| 	vcc->push = NULL; | ||||
| 	vcc->pop = NULL; | ||||
|  | @ -630,7 +630,7 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size) | |||
| 		goto out; | ||||
| 	} | ||||
| 	pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize); | ||||
| 	atomic_add(skb->truesize, &sk->sk_wmem_alloc); | ||||
| 	refcount_add(skb->truesize, &sk->sk_wmem_alloc); | ||||
| 
 | ||||
| 	skb->dev = NULL; /* for paths shared with net_device interfaces */ | ||||
| 	ATM_SKB(skb)->atm_options = vcc->atm_options; | ||||
|  |  | |||
|  | @ -181,7 +181,7 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb) | |||
| 	ATM_SKB(skb)->vcc = vcc; | ||||
| 	ATM_SKB(skb)->atm_options = vcc->atm_options; | ||||
| 
 | ||||
| 	atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | ||||
| 	refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | ||||
| 	if (vcc->send(vcc, skb) < 0) { | ||||
| 		dev->stats.tx_dropped++; | ||||
| 		return; | ||||
|  | @ -345,7 +345,7 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) | |||
| 	int i; | ||||
| 	char *tmp;		/* FIXME */ | ||||
| 
 | ||||
| 	atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | ||||
| 	WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc)); | ||||
| 	mesg = (struct atmlec_msg *)skb->data; | ||||
| 	tmp = skb->data; | ||||
| 	tmp += sizeof(struct atmlec_msg); | ||||
|  |  | |||
|  | @ -555,7 +555,7 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc) | |||
| 					sizeof(struct llc_snap_hdr)); | ||||
| 	} | ||||
| 
 | ||||
| 	atomic_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc); | ||||
| 	refcount_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc); | ||||
| 	ATM_SKB(skb)->atm_options = entry->shortcut->atm_options; | ||||
| 	entry->shortcut->send(entry->shortcut, skb); | ||||
| 	entry->packets_fwded++; | ||||
|  | @ -911,7 +911,7 @@ static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb) | |||
| 
 | ||||
| 	struct mpoa_client *mpc = find_mpc_by_vcc(vcc); | ||||
| 	struct k_message *mesg = (struct k_message *)skb->data; | ||||
| 	atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | ||||
| 	WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc)); | ||||
| 
 | ||||
| 	if (mpc == NULL) { | ||||
| 		pr_info("no mpc found\n"); | ||||
|  |  | |||
|  | @ -350,7 +350,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb) | |||
| 		return 1; | ||||
| 	} | ||||
| 
 | ||||
| 	atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc); | ||||
| 	refcount_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc); | ||||
| 	ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options; | ||||
| 	pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", | ||||
| 		 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev); | ||||
|  |  | |||
|  | @ -35,7 +35,7 @@ static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb) | |||
| 
 | ||||
| 	pr_debug("(%d) %d -= %d\n", | ||||
| 		 vcc->vci, sk_wmem_alloc_get(sk), skb->truesize); | ||||
| 	atomic_sub(skb->truesize, &sk->sk_wmem_alloc); | ||||
| 	WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)); | ||||
| 	dev_kfree_skb_any(skb); | ||||
| 	sk->sk_write_space(sk); | ||||
| } | ||||
|  |  | |||
|  | @ -67,7 +67,7 @@ static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb) | |||
| 	struct sock *sk; | ||||
| 
 | ||||
| 	msg = (struct atmsvc_msg *) skb->data; | ||||
| 	atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | ||||
| 	WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc)); | ||||
| 	vcc = *(struct atm_vcc **) &msg->vcc; | ||||
| 	pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc); | ||||
| 	sk = sk_atm(vcc); | ||||
|  |  | |||
|  | @ -1013,7 +1013,7 @@ static const struct proto_ops caif_stream_ops = { | |||
| static void caif_sock_destructor(struct sock *sk) | ||||
| { | ||||
| 	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||||
| 	caif_assert(!atomic_read(&sk->sk_wmem_alloc)); | ||||
| 	caif_assert(!refcount_read(&sk->sk_wmem_alloc)); | ||||
| 	caif_assert(sk_unhashed(sk)); | ||||
| 	caif_assert(!sk->sk_socket); | ||||
| 	if (!sock_flag(sk, SOCK_DEAD)) { | ||||
|  |  | |||
|  | @ -614,7 +614,7 @@ int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from) | |||
| 		skb->data_len += copied; | ||||
| 		skb->len += copied; | ||||
| 		skb->truesize += truesize; | ||||
| 		atomic_add(truesize, &skb->sk->sk_wmem_alloc); | ||||
| 		refcount_add(truesize, &skb->sk->sk_wmem_alloc); | ||||
| 		while (copied) { | ||||
| 			int size = min_t(int, copied, PAGE_SIZE - start); | ||||
| 			skb_fill_page_desc(skb, frag++, pages[n], start, size); | ||||
|  |  | |||
|  | @ -3024,7 +3024,7 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, | |||
| 		get_page(pfrag->page); | ||||
| 
 | ||||
| 		skb->truesize += copy; | ||||
| 		atomic_add(copy, &sk->sk_wmem_alloc); | ||||
| 		refcount_add(copy, &sk->sk_wmem_alloc); | ||||
| 		skb->len += copy; | ||||
| 		skb->data_len += copy; | ||||
| 		offset += copy; | ||||
|  |  | |||
|  | @ -1528,7 +1528,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, | |||
| 		if (likely(sk->sk_net_refcnt)) | ||||
| 			get_net(net); | ||||
| 		sock_net_set(sk, net); | ||||
| 		atomic_set(&sk->sk_wmem_alloc, 1); | ||||
| 		refcount_set(&sk->sk_wmem_alloc, 1); | ||||
| 
 | ||||
| 		mem_cgroup_sk_alloc(sk); | ||||
| 		cgroup_sk_alloc(&sk->sk_cgrp_data); | ||||
|  | @ -1552,7 +1552,7 @@ static void __sk_destruct(struct rcu_head *head) | |||
| 		sk->sk_destruct(sk); | ||||
| 
 | ||||
| 	filter = rcu_dereference_check(sk->sk_filter, | ||||
| 				       atomic_read(&sk->sk_wmem_alloc) == 0); | ||||
| 				       refcount_read(&sk->sk_wmem_alloc) == 0); | ||||
| 	if (filter) { | ||||
| 		sk_filter_uncharge(sk, filter); | ||||
| 		RCU_INIT_POINTER(sk->sk_filter, NULL); | ||||
|  | @ -1602,7 +1602,7 @@ void sk_free(struct sock *sk) | |||
| 	 * some packets are still in some tx queue. | ||||
| 	 * If not null, sock_wfree() will call __sk_free(sk) later | ||||
| 	 */ | ||||
| 	if (atomic_dec_and_test(&sk->sk_wmem_alloc)) | ||||
| 	if (refcount_dec_and_test(&sk->sk_wmem_alloc)) | ||||
| 		__sk_free(sk); | ||||
| } | ||||
| EXPORT_SYMBOL(sk_free); | ||||
|  | @ -1659,7 +1659,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) | |||
| 		/*
 | ||||
| 		 * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) | ||||
| 		 */ | ||||
| 		atomic_set(&newsk->sk_wmem_alloc, 1); | ||||
| 		refcount_set(&newsk->sk_wmem_alloc, 1); | ||||
| 		atomic_set(&newsk->sk_omem_alloc, 0); | ||||
| 		sk_init_common(newsk); | ||||
| 
 | ||||
|  | @ -1787,7 +1787,7 @@ void sock_wfree(struct sk_buff *skb) | |||
| 		 * Keep a reference on sk_wmem_alloc, this will be released | ||||
| 		 * after sk_write_space() call | ||||
| 		 */ | ||||
| 		atomic_sub(len - 1, &sk->sk_wmem_alloc); | ||||
| 		WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc)); | ||||
| 		sk->sk_write_space(sk); | ||||
| 		len = 1; | ||||
| 	} | ||||
|  | @ -1795,7 +1795,7 @@ void sock_wfree(struct sk_buff *skb) | |||
| 	 * if sk_wmem_alloc reaches 0, we must finish what sk_free() | ||||
| 	 * could not do because of in-flight packets | ||||
| 	 */ | ||||
| 	if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) | ||||
| 	if (refcount_sub_and_test(len, &sk->sk_wmem_alloc)) | ||||
| 		__sk_free(sk); | ||||
| } | ||||
| EXPORT_SYMBOL(sock_wfree); | ||||
|  | @ -1807,7 +1807,7 @@ void __sock_wfree(struct sk_buff *skb) | |||
| { | ||||
| 	struct sock *sk = skb->sk; | ||||
| 
 | ||||
| 	if (atomic_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)) | ||||
| 	if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)) | ||||
| 		__sk_free(sk); | ||||
| } | ||||
| 
 | ||||
|  | @ -1829,7 +1829,7 @@ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) | |||
| 	 * is enough to guarantee sk_free() wont free this sock until | ||||
| 	 * all in-flight packets are completed | ||||
| 	 */ | ||||
| 	atomic_add(skb->truesize, &sk->sk_wmem_alloc); | ||||
| 	refcount_add(skb->truesize, &sk->sk_wmem_alloc); | ||||
| } | ||||
| EXPORT_SYMBOL(skb_set_owner_w); | ||||
| 
 | ||||
|  | @ -1852,7 +1852,7 @@ void skb_orphan_partial(struct sk_buff *skb) | |||
| 		struct sock *sk = skb->sk; | ||||
| 
 | ||||
| 		if (atomic_inc_not_zero(&sk->sk_refcnt)) { | ||||
| 			atomic_sub(skb->truesize, &sk->sk_wmem_alloc); | ||||
| 			WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)); | ||||
| 			skb->destructor = sock_efree; | ||||
| 		} | ||||
| 	} else { | ||||
|  | @ -1912,7 +1912,7 @@ EXPORT_SYMBOL(sock_i_ino); | |||
| struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, | ||||
| 			     gfp_t priority) | ||||
| { | ||||
| 	if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { | ||||
| 	if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { | ||||
| 		struct sk_buff *skb = alloc_skb(size, priority); | ||||
| 		if (skb) { | ||||
| 			skb_set_owner_w(skb, sk); | ||||
|  | @ -1987,7 +1987,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo) | |||
| 			break; | ||||
| 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | ||||
| 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | ||||
| 		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) | ||||
| 		if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) | ||||
| 			break; | ||||
| 		if (sk->sk_shutdown & SEND_SHUTDOWN) | ||||
| 			break; | ||||
|  | @ -2310,7 +2310,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) | |||
| 		if (sk->sk_type == SOCK_STREAM) { | ||||
| 			if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) | ||||
| 				return 1; | ||||
| 		} else if (atomic_read(&sk->sk_wmem_alloc) < | ||||
| 		} else if (refcount_read(&sk->sk_wmem_alloc) < | ||||
| 			   prot->sysctl_wmem[0]) | ||||
| 				return 1; | ||||
| 	} | ||||
|  | @ -2577,7 +2577,7 @@ static void sock_def_write_space(struct sock *sk) | |||
| 	/* Do not wake up a writer until he can make "significant"
 | ||||
| 	 * progress.  --DaveM | ||||
| 	 */ | ||||
| 	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { | ||||
| 	if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { | ||||
| 		wq = rcu_dereference(sk->sk_wq); | ||||
| 		if (skwq_has_sleeper(wq)) | ||||
| 			wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | | ||||
|  |  | |||
|  | @ -150,7 +150,7 @@ void inet_sock_destruct(struct sock *sk) | |||
| 	} | ||||
| 
 | ||||
| 	WARN_ON(atomic_read(&sk->sk_rmem_alloc)); | ||||
| 	WARN_ON(atomic_read(&sk->sk_wmem_alloc)); | ||||
| 	WARN_ON(refcount_read(&sk->sk_wmem_alloc)); | ||||
| 	WARN_ON(sk->sk_wmem_queued); | ||||
| 	WARN_ON(sk->sk_forward_alloc); | ||||
| 
 | ||||
|  |  | |||
|  | @ -307,7 +307,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * | |||
| 			skb->data_len += tailen; | ||||
| 			skb->truesize += tailen; | ||||
| 			if (sk) | ||||
| 				atomic_add(tailen, &sk->sk_wmem_alloc); | ||||
| 				refcount_add(tailen, &sk->sk_wmem_alloc); | ||||
| 
 | ||||
| 			goto out; | ||||
| 		} | ||||
|  |  | |||
|  | @ -1037,7 +1037,7 @@ static int __ip_append_data(struct sock *sk, | |||
| 						(flags & MSG_DONTWAIT), &err); | ||||
| 			} else { | ||||
| 				skb = NULL; | ||||
| 				if (atomic_read(&sk->sk_wmem_alloc) <= | ||||
| 				if (refcount_read(&sk->sk_wmem_alloc) <= | ||||
| 				    2 * sk->sk_sndbuf) | ||||
| 					skb = sock_wmalloc(sk, | ||||
| 							   alloclen + hh_len + 15, 1, | ||||
|  | @ -1145,7 +1145,7 @@ static int __ip_append_data(struct sock *sk, | |||
| 			skb->len += copy; | ||||
| 			skb->data_len += copy; | ||||
| 			skb->truesize += copy; | ||||
| 			atomic_add(copy, &sk->sk_wmem_alloc); | ||||
| 			refcount_add(copy, &sk->sk_wmem_alloc); | ||||
| 		} | ||||
| 		offset += copy; | ||||
| 		length -= copy; | ||||
|  | @ -1369,7 +1369,7 @@ ssize_t	ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, | |||
| 		skb->len += len; | ||||
| 		skb->data_len += len; | ||||
| 		skb->truesize += len; | ||||
| 		atomic_add(len, &sk->sk_wmem_alloc); | ||||
| 		refcount_add(len, &sk->sk_wmem_alloc); | ||||
| 		offset += len; | ||||
| 		size -= len; | ||||
| 	} | ||||
|  |  | |||
|  | @ -664,7 +664,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, | |||
| 	return skb->len < size_goal && | ||||
| 	       sysctl_tcp_autocorking && | ||||
| 	       skb != tcp_write_queue_head(sk) && | ||||
| 	       atomic_read(&sk->sk_wmem_alloc) > skb->truesize; | ||||
| 	       refcount_read(&sk->sk_wmem_alloc) > skb->truesize; | ||||
| } | ||||
| 
 | ||||
| static void tcp_push(struct sock *sk, int flags, int mss_now, | ||||
|  | @ -692,7 +692,7 @@ static void tcp_push(struct sock *sk, int flags, int mss_now, | |||
| 		/* It is possible TX completion already happened
 | ||||
| 		 * before we set TSQ_THROTTLED. | ||||
| 		 */ | ||||
| 		if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize) | ||||
| 		if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize) | ||||
| 			return; | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -152,7 +152,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, | |||
| 		swap(gso_skb->sk, skb->sk); | ||||
| 		swap(gso_skb->destructor, skb->destructor); | ||||
| 		sum_truesize += skb->truesize; | ||||
| 		atomic_add(sum_truesize - gso_skb->truesize, | ||||
| 		refcount_add(sum_truesize - gso_skb->truesize, | ||||
| 			   &skb->sk->sk_wmem_alloc); | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -861,12 +861,11 @@ void tcp_wfree(struct sk_buff *skb) | |||
| 	struct sock *sk = skb->sk; | ||||
| 	struct tcp_sock *tp = tcp_sk(sk); | ||||
| 	unsigned long flags, nval, oval; | ||||
| 	int wmem; | ||||
| 
 | ||||
| 	/* Keep one reference on sk_wmem_alloc.
 | ||||
| 	 * Will be released by sk_free() from here or tcp_tasklet_func() | ||||
| 	 */ | ||||
| 	wmem = atomic_sub_return(skb->truesize - 1, &sk->sk_wmem_alloc); | ||||
| 	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc)); | ||||
| 
 | ||||
| 	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
 | ||||
| 	 * Wait until our queues (qdisc + devices) are drained. | ||||
|  | @ -875,7 +874,7 @@ void tcp_wfree(struct sk_buff *skb) | |||
| 	 * - chance for incoming ACK (processed by another cpu maybe) | ||||
| 	 *   to migrate this flow (skb->ooo_okay will be eventually set) | ||||
| 	 */ | ||||
| 	if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) | ||||
| 	if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) { | ||||
|  | @ -925,7 +924,7 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer) | |||
| 		if (nval != oval) | ||||
| 			continue; | ||||
| 
 | ||||
| 		if (!atomic_inc_not_zero(&sk->sk_wmem_alloc)) | ||||
| 		if (!refcount_inc_not_zero(&sk->sk_wmem_alloc)) | ||||
| 			break; | ||||
| 		/* queue this socket to tasklet queue */ | ||||
| 		tsq = this_cpu_ptr(&tsq_tasklet); | ||||
|  | @ -1045,7 +1044,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
| 	skb->sk = sk; | ||||
| 	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree; | ||||
| 	skb_set_hash_from_sk(skb, sk); | ||||
| 	atomic_add(skb->truesize, &sk->sk_wmem_alloc); | ||||
| 	refcount_add(skb->truesize, &sk->sk_wmem_alloc); | ||||
| 
 | ||||
| 	skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm); | ||||
| 
 | ||||
|  | @ -2176,7 +2175,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, | |||
| 	limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes); | ||||
| 	limit <<= factor; | ||||
| 
 | ||||
| 	if (atomic_read(&sk->sk_wmem_alloc) > limit) { | ||||
| 	if (refcount_read(&sk->sk_wmem_alloc) > limit) { | ||||
| 		/* Always send the 1st or 2nd skb in write queue.
 | ||||
| 		 * No need to wait for TX completion to call us back, | ||||
| 		 * after softirq/tasklet schedule. | ||||
|  | @ -2192,7 +2191,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, | |||
| 		 * test again the condition. | ||||
| 		 */ | ||||
| 		smp_mb__after_atomic(); | ||||
| 		if (atomic_read(&sk->sk_wmem_alloc) > limit) | ||||
| 		if (refcount_read(&sk->sk_wmem_alloc) > limit) | ||||
| 			return true; | ||||
| 	} | ||||
| 	return false; | ||||
|  | @ -2812,7 +2811,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) | |||
| 	/* Do not sent more than we queued. 1/4 is reserved for possible
 | ||||
| 	 * copying overhead: fragmentation, tunneling, mangling etc. | ||||
| 	 */ | ||||
| 	if (atomic_read(&sk->sk_wmem_alloc) > | ||||
| 	if (refcount_read(&sk->sk_wmem_alloc) > | ||||
| 	    min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), | ||||
| 		  sk->sk_sndbuf)) | ||||
| 		return -EAGAIN; | ||||
|  |  | |||
|  | @ -275,7 +275,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info | |||
| 			skb->data_len += tailen; | ||||
| 			skb->truesize += tailen; | ||||
| 			if (sk) | ||||
| 				atomic_add(tailen, &sk->sk_wmem_alloc); | ||||
| 				refcount_add(tailen, &sk->sk_wmem_alloc); | ||||
| 
 | ||||
| 			goto out; | ||||
| 		} | ||||
|  |  | |||
|  | @ -1472,7 +1472,7 @@ static int __ip6_append_data(struct sock *sk, | |||
| 						(flags & MSG_DONTWAIT), &err); | ||||
| 			} else { | ||||
| 				skb = NULL; | ||||
| 				if (atomic_read(&sk->sk_wmem_alloc) <= | ||||
| 				if (refcount_read(&sk->sk_wmem_alloc) <= | ||||
| 				    2 * sk->sk_sndbuf) | ||||
| 					skb = sock_wmalloc(sk, | ||||
| 							   alloclen + hh_len, 1, | ||||
|  | @ -1581,7 +1581,7 @@ static int __ip6_append_data(struct sock *sk, | |||
| 			skb->len += copy; | ||||
| 			skb->data_len += copy; | ||||
| 			skb->truesize += copy; | ||||
| 			atomic_add(copy, &sk->sk_wmem_alloc); | ||||
| 			refcount_add(copy, &sk->sk_wmem_alloc); | ||||
| 		} | ||||
| 		offset += copy; | ||||
| 		length -= copy; | ||||
|  |  | |||
|  | @ -162,7 +162,7 @@ static void kcm_format_psock(struct kcm_psock *psock, struct seq_file *seq, | |||
| 		   psock->sk->sk_receive_queue.qlen, | ||||
| 		   atomic_read(&psock->sk->sk_rmem_alloc), | ||||
| 		   psock->sk->sk_write_queue.qlen, | ||||
| 		   atomic_read(&psock->sk->sk_wmem_alloc)); | ||||
| 		   refcount_read(&psock->sk->sk_wmem_alloc)); | ||||
| 
 | ||||
| 	if (psock->done) | ||||
| 		seq_puts(seq, "Done "); | ||||
|  |  | |||
|  | @ -109,7 +109,7 @@ static void pfkey_sock_destruct(struct sock *sk) | |||
| 	} | ||||
| 
 | ||||
| 	WARN_ON(atomic_read(&sk->sk_rmem_alloc)); | ||||
| 	WARN_ON(atomic_read(&sk->sk_wmem_alloc)); | ||||
| 	WARN_ON(refcount_read(&sk->sk_wmem_alloc)); | ||||
| 
 | ||||
| 	atomic_dec(&net_pfkey->socks_nr); | ||||
| } | ||||
|  |  | |||
|  | @ -372,7 +372,7 @@ static void netlink_sock_destruct(struct sock *sk) | |||
| 	} | ||||
| 
 | ||||
| 	WARN_ON(atomic_read(&sk->sk_rmem_alloc)); | ||||
| 	WARN_ON(atomic_read(&sk->sk_wmem_alloc)); | ||||
| 	WARN_ON(refcount_read(&sk->sk_wmem_alloc)); | ||||
| 	WARN_ON(nlk_sk(sk)->groups); | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -1317,7 +1317,7 @@ static void packet_sock_destruct(struct sock *sk) | |||
| 	skb_queue_purge(&sk->sk_error_queue); | ||||
| 
 | ||||
| 	WARN_ON(atomic_read(&sk->sk_rmem_alloc)); | ||||
| 	WARN_ON(atomic_read(&sk->sk_wmem_alloc)); | ||||
| 	WARN_ON(refcount_read(&sk->sk_wmem_alloc)); | ||||
| 
 | ||||
| 	if (!sock_flag(sk, SOCK_DEAD)) { | ||||
| 		pr_err("Attempt to release alive packet socket: %p\n", sk); | ||||
|  | @ -2523,7 +2523,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, | |||
| 	skb->data_len = to_write; | ||||
| 	skb->len += to_write; | ||||
| 	skb->truesize += to_write; | ||||
| 	atomic_add(to_write, &po->sk.sk_wmem_alloc); | ||||
| 	refcount_add(to_write, &po->sk.sk_wmem_alloc); | ||||
| 
 | ||||
| 	while (likely(to_write)) { | ||||
| 		nr_frags = skb_shinfo(skb)->nr_frags; | ||||
|  |  | |||
|  | @ -360,7 +360,7 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock, | |||
| 		return POLLHUP; | ||||
| 
 | ||||
| 	if (sk->sk_state == TCP_ESTABLISHED && | ||||
| 		atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf && | ||||
| 		refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf && | ||||
| 		atomic_read(&pn->tx_credits)) | ||||
| 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | ||||
| 
 | ||||
|  |  | |||
|  | @ -202,7 +202,7 @@ void rds_tcp_write_space(struct sock *sk) | |||
| 	tc->t_last_seen_una = rds_tcp_snd_una(tc); | ||||
| 	rds_send_path_drop_acked(cp, rds_tcp_snd_una(tc), rds_tcp_is_acked); | ||||
| 
 | ||||
| 	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) | ||||
| 	if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) | ||||
| 		queue_delayed_work(rds_wq, &cp->cp_send_w, 0); | ||||
| 
 | ||||
| out: | ||||
|  |  | |||
|  | @ -53,7 +53,7 @@ static void rxrpc_sock_destructor(struct sock *); | |||
|  */ | ||||
| static inline int rxrpc_writable(struct sock *sk) | ||||
| { | ||||
| 	return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf; | ||||
| 	return refcount_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | @ -730,7 +730,7 @@ static void rxrpc_sock_destructor(struct sock *sk) | |||
| 
 | ||||
| 	rxrpc_purge_queue(&sk->sk_receive_queue); | ||||
| 
 | ||||
| 	WARN_ON(atomic_read(&sk->sk_wmem_alloc)); | ||||
| 	WARN_ON(refcount_read(&sk->sk_wmem_alloc)); | ||||
| 	WARN_ON(!sk_unhashed(sk)); | ||||
| 	WARN_ON(sk->sk_socket); | ||||
| 
 | ||||
|  |  | |||
|  | @ -498,7 +498,7 @@ static void sch_atm_dequeue(unsigned long data) | |||
| 			ATM_SKB(skb)->vcc = flow->vcc; | ||||
| 			memcpy(skb_push(skb, flow->hdr_len), flow->hdr, | ||||
| 			       flow->hdr_len); | ||||
| 			atomic_add(skb->truesize, | ||||
| 			refcount_add(skb->truesize, | ||||
| 				   &sk_atm(flow->vcc)->sk_wmem_alloc); | ||||
| 			/* atm.atm_options are already set by atm_tc_enqueue */ | ||||
| 			flow->vcc->send(flow->vcc, skb); | ||||
|  |  | |||
|  | @ -402,7 +402,7 @@ static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk) | |||
| 	 * therefore only reserve a single byte to keep socket around until | ||||
| 	 * the packet has been transmitted. | ||||
| 	 */ | ||||
| 	atomic_inc(&sk->sk_wmem_alloc); | ||||
| 	refcount_inc(&sk->sk_wmem_alloc); | ||||
| } | ||||
| 
 | ||||
| static int sctp_packet_pack(struct sctp_packet *packet, | ||||
|  |  | |||
|  | @ -363,7 +363,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) | |||
| 		assoc->stream.outcnt, assoc->max_retrans, | ||||
| 		assoc->init_retries, assoc->shutdown_retries, | ||||
| 		assoc->rtx_data_chunks, | ||||
| 		atomic_read(&sk->sk_wmem_alloc), | ||||
| 		refcount_read(&sk->sk_wmem_alloc), | ||||
| 		sk->sk_wmem_queued, | ||||
| 		sk->sk_sndbuf, | ||||
| 		sk->sk_rcvbuf); | ||||
|  |  | |||
|  | @ -164,7 +164,7 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk) | |||
| 				sizeof(struct sk_buff) + | ||||
| 				sizeof(struct sctp_chunk); | ||||
| 
 | ||||
| 	atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); | ||||
| 	refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); | ||||
| 	sk->sk_wmem_queued += chunk->skb->truesize; | ||||
| 	sk_mem_charge(sk, chunk->skb->truesize); | ||||
| } | ||||
|  | @ -7684,7 +7684,7 @@ static void sctp_wfree(struct sk_buff *skb) | |||
| 				sizeof(struct sk_buff) + | ||||
| 				sizeof(struct sctp_chunk); | ||||
| 
 | ||||
| 	atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); | ||||
| 	WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc)); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * This undoes what is done via sctp_set_owner_w and sk_mem_charge | ||||
|  |  | |||
|  | @ -442,7 +442,7 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other) | |||
| static int unix_writable(const struct sock *sk) | ||||
| { | ||||
| 	return sk->sk_state != TCP_LISTEN && | ||||
| 	       (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf; | ||||
| 	       (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf; | ||||
| } | ||||
| 
 | ||||
| static void unix_write_space(struct sock *sk) | ||||
|  | @ -487,7 +487,7 @@ static void unix_sock_destructor(struct sock *sk) | |||
| 
 | ||||
| 	skb_queue_purge(&sk->sk_receive_queue); | ||||
| 
 | ||||
| 	WARN_ON(atomic_read(&sk->sk_wmem_alloc)); | ||||
| 	WARN_ON(refcount_read(&sk->sk_wmem_alloc)); | ||||
| 	WARN_ON(!sk_unhashed(sk)); | ||||
| 	WARN_ON(sk->sk_socket); | ||||
| 	if (!sock_flag(sk, SOCK_DEAD)) { | ||||
|  | @ -2033,7 +2033,7 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, | |||
| 	skb->len += size; | ||||
| 	skb->data_len += size; | ||||
| 	skb->truesize += size; | ||||
| 	atomic_add(size, &sk->sk_wmem_alloc); | ||||
| 	refcount_add(size, &sk->sk_wmem_alloc); | ||||
| 
 | ||||
| 	if (newskb) { | ||||
| 		err = unix_scm_to_skb(&scm, skb, false); | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Reshetova, Elena
						Reshetova, Elena