mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	netlink: Call cb->done from a worker thread
The cb->done interface expects to be called in process context.
This was broken by the netlink RCU conversion.  This patch fixes
it by adding a worker struct to make the cb->done call where
necessary.
Fixes: 21e4902aea ("netlink: Lockless lookup with RCU grace...")
Reported-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Acked-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
			
			
This commit is contained in:
		
							parent
							
								
									95c2027bfe
								
							
						
					
					
						commit
						707693c8a4
					
				
					 2 changed files with 25 additions and 4 deletions
				
			
		| 
						 | 
				
			
			@ -322,14 +322,11 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
 | 
			
		|||
	sk_mem_charge(sk, skb->truesize);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void netlink_sock_destruct(struct sock *sk)
 | 
			
		||||
static void __netlink_sock_destruct(struct sock *sk)
 | 
			
		||||
{
 | 
			
		||||
	struct netlink_sock *nlk = nlk_sk(sk);
 | 
			
		||||
 | 
			
		||||
	if (nlk->cb_running) {
 | 
			
		||||
		if (nlk->cb.done)
 | 
			
		||||
			nlk->cb.done(&nlk->cb);
 | 
			
		||||
 | 
			
		||||
		module_put(nlk->cb.module);
 | 
			
		||||
		kfree_skb(nlk->cb.skb);
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -346,6 +343,28 @@ static void netlink_sock_destruct(struct sock *sk)
 | 
			
		|||
	WARN_ON(nlk_sk(sk)->groups);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void netlink_sock_destruct_work(struct work_struct *work)
 | 
			
		||||
{
 | 
			
		||||
	struct netlink_sock *nlk = container_of(work, struct netlink_sock,
 | 
			
		||||
						work);
 | 
			
		||||
 | 
			
		||||
	nlk->cb.done(&nlk->cb);
 | 
			
		||||
	__netlink_sock_destruct(&nlk->sk);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void netlink_sock_destruct(struct sock *sk)
 | 
			
		||||
{
 | 
			
		||||
	struct netlink_sock *nlk = nlk_sk(sk);
 | 
			
		||||
 | 
			
		||||
	if (nlk->cb_running && nlk->cb.done) {
 | 
			
		||||
		INIT_WORK(&nlk->work, netlink_sock_destruct_work);
 | 
			
		||||
		schedule_work(&nlk->work);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	__netlink_sock_destruct(sk);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
 | 
			
		||||
 * SMP. Look, when several writers sleep and reader wakes them up, all but one
 | 
			
		||||
 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3,6 +3,7 @@
 | 
			
		|||
 | 
			
		||||
#include <linux/rhashtable.h>
 | 
			
		||||
#include <linux/atomic.h>
 | 
			
		||||
#include <linux/workqueue.h>
 | 
			
		||||
#include <net/sock.h>
 | 
			
		||||
 | 
			
		||||
#define NLGRPSZ(x)	(ALIGN(x, sizeof(unsigned long) * 8) / 8)
 | 
			
		||||
| 
						 | 
				
			
			@ -33,6 +34,7 @@ struct netlink_sock {
 | 
			
		|||
 | 
			
		||||
	struct rhash_head	node;
 | 
			
		||||
	struct rcu_head		rcu;
 | 
			
		||||
	struct work_struct	work;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static inline struct netlink_sock *nlk_sk(struct sock *sk)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue