forked from mirrors/linux
		
	netns: use a spin_lock to protect nsid management
Before this patch, nsid were protected by the rtnl lock. The goal of this patch is to be able to find a nsid without needing to hold the rtnl lock. The next patch will introduce a netlink socket option to listen to all netns that have a nsid assigned into the netns where the socket is opened. Thus, it's important to call rtnl_net_notifyid() outside the spinlock, to avoid a recursive lock (nsid are notified via rtnl). This was the main reason of the previous patch. Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									3138dbf881
								
							
						
					
					
						commit
						95f38411df
					
				
					 1 changed files with 44 additions and 13 deletions
				
			
		|  | @ -28,6 +28,7 @@ | ||||||
| static LIST_HEAD(pernet_list); | static LIST_HEAD(pernet_list); | ||||||
| static struct list_head *first_device = &pernet_list; | static struct list_head *first_device = &pernet_list; | ||||||
| DEFINE_MUTEX(net_mutex); | DEFINE_MUTEX(net_mutex); | ||||||
|  | static DEFINE_SPINLOCK(nsid_lock); | ||||||
| 
 | 
 | ||||||
| LIST_HEAD(net_namespace_list); | LIST_HEAD(net_namespace_list); | ||||||
| EXPORT_SYMBOL_GPL(net_namespace_list); | EXPORT_SYMBOL_GPL(net_namespace_list); | ||||||
|  | @ -147,18 +148,17 @@ static void ops_free_list(const struct pernet_operations *ops, | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /* should be called with nsid_lock held */ | ||||||
| static int alloc_netid(struct net *net, struct net *peer, int reqid) | static int alloc_netid(struct net *net, struct net *peer, int reqid) | ||||||
| { | { | ||||||
| 	int min = 0, max = 0; | 	int min = 0, max = 0; | ||||||
| 
 | 
 | ||||||
| 	ASSERT_RTNL(); |  | ||||||
| 
 |  | ||||||
| 	if (reqid >= 0) { | 	if (reqid >= 0) { | ||||||
| 		min = reqid; | 		min = reqid; | ||||||
| 		max = reqid + 1; | 		max = reqid + 1; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return idr_alloc(&net->netns_ids, peer, min, max, GFP_KERNEL); | 	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /* This function is used by idr_for_each(). If net is equal to peer, the
 | /* This function is used by idr_for_each(). If net is equal to peer, the
 | ||||||
|  | @ -174,13 +174,15 @@ static int net_eq_idr(int id, void *net, void *peer) | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
 | ||||||
|  |  * is set to true, thus the caller knows that the new id must be notified via | ||||||
|  |  * rtnl. | ||||||
|  |  */ | ||||||
| static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) | static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) | ||||||
| { | { | ||||||
| 	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); | 	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); | ||||||
| 	bool alloc_it = *alloc; | 	bool alloc_it = *alloc; | ||||||
| 
 | 
 | ||||||
| 	ASSERT_RTNL(); |  | ||||||
| 
 |  | ||||||
| 	*alloc = false; | 	*alloc = false; | ||||||
| 
 | 
 | ||||||
| 	/* Magic value for id 0. */ | 	/* Magic value for id 0. */ | ||||||
|  | @ -198,6 +200,7 @@ static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) | ||||||
| 	return NETNSA_NSID_NOT_ASSIGNED; | 	return NETNSA_NSID_NOT_ASSIGNED; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /* should be called with nsid_lock held */ | ||||||
| static int __peernet2id(struct net *net, struct net *peer) | static int __peernet2id(struct net *net, struct net *peer) | ||||||
| { | { | ||||||
| 	bool no = false; | 	bool no = false; | ||||||
|  | @ -211,27 +214,46 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id); | ||||||
|  */ |  */ | ||||||
| int peernet2id_alloc(struct net *net, struct net *peer) | int peernet2id_alloc(struct net *net, struct net *peer) | ||||||
| { | { | ||||||
| 	bool alloc = atomic_read(&peer->count) == 0 ? false : true; | 	unsigned long flags; | ||||||
|  | 	bool alloc; | ||||||
| 	int id; | 	int id; | ||||||
| 
 | 
 | ||||||
|  | 	spin_lock_irqsave(&nsid_lock, flags); | ||||||
|  | 	alloc = atomic_read(&peer->count) == 0 ? false : true; | ||||||
| 	id = __peernet2id_alloc(net, peer, &alloc); | 	id = __peernet2id_alloc(net, peer, &alloc); | ||||||
|  | 	spin_unlock_irqrestore(&nsid_lock, flags); | ||||||
| 	if (alloc && id >= 0) | 	if (alloc && id >= 0) | ||||||
| 		rtnl_net_notifyid(net, RTM_NEWNSID, id); | 		rtnl_net_notifyid(net, RTM_NEWNSID, id); | ||||||
| 	return id; | 	return id; | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(peernet2id_alloc); | EXPORT_SYMBOL(peernet2id_alloc); | ||||||
| 
 | 
 | ||||||
|  | /* This function returns, if assigned, the id of a peer netns. */ | ||||||
|  | static int peernet2id(struct net *net, struct net *peer) | ||||||
|  | { | ||||||
|  | 	unsigned long flags; | ||||||
|  | 	int id; | ||||||
|  | 
 | ||||||
|  | 	spin_lock_irqsave(&nsid_lock, flags); | ||||||
|  | 	id = __peernet2id(net, peer); | ||||||
|  | 	spin_unlock_irqrestore(&nsid_lock, flags); | ||||||
|  | 	return id; | ||||||
|  | } | ||||||
|  | 
 | ||||||
| struct net *get_net_ns_by_id(struct net *net, int id) | struct net *get_net_ns_by_id(struct net *net, int id) | ||||||
| { | { | ||||||
|  | 	unsigned long flags; | ||||||
| 	struct net *peer; | 	struct net *peer; | ||||||
| 
 | 
 | ||||||
| 	if (id < 0) | 	if (id < 0) | ||||||
| 		return NULL; | 		return NULL; | ||||||
| 
 | 
 | ||||||
| 	rcu_read_lock(); | 	rcu_read_lock(); | ||||||
|  | 	spin_lock_irqsave(&nsid_lock, flags); | ||||||
| 	peer = idr_find(&net->netns_ids, id); | 	peer = idr_find(&net->netns_ids, id); | ||||||
| 	if (peer) | 	if (peer) | ||||||
| 		get_net(peer); | 		get_net(peer); | ||||||
|  | 	spin_unlock_irqrestore(&nsid_lock, flags); | ||||||
| 	rcu_read_unlock(); | 	rcu_read_unlock(); | ||||||
| 
 | 
 | ||||||
| 	return peer; | 	return peer; | ||||||
|  | @ -372,14 +394,19 @@ static void cleanup_net(struct work_struct *work) | ||||||
| 		list_del_rcu(&net->list); | 		list_del_rcu(&net->list); | ||||||
| 		list_add_tail(&net->exit_list, &net_exit_list); | 		list_add_tail(&net->exit_list, &net_exit_list); | ||||||
| 		for_each_net(tmp) { | 		for_each_net(tmp) { | ||||||
| 			int id = __peernet2id(tmp, net); | 			int id; | ||||||
| 
 | 
 | ||||||
| 			if (id >= 0) { | 			spin_lock_irq(&nsid_lock); | ||||||
| 				rtnl_net_notifyid(tmp, RTM_DELNSID, id); | 			id = __peernet2id(tmp, net); | ||||||
|  | 			if (id >= 0) | ||||||
| 				idr_remove(&tmp->netns_ids, id); | 				idr_remove(&tmp->netns_ids, id); | ||||||
|  | 			spin_unlock_irq(&nsid_lock); | ||||||
|  | 			if (id >= 0) | ||||||
|  | 				rtnl_net_notifyid(tmp, RTM_DELNSID, id); | ||||||
| 		} | 		} | ||||||
| 		} | 		spin_lock_irq(&nsid_lock); | ||||||
| 		idr_destroy(&net->netns_ids); | 		idr_destroy(&net->netns_ids); | ||||||
|  | 		spin_unlock_irq(&nsid_lock); | ||||||
| 
 | 
 | ||||||
| 	} | 	} | ||||||
| 	rtnl_unlock(); | 	rtnl_unlock(); | ||||||
|  | @ -507,6 +534,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh) | ||||||
| { | { | ||||||
| 	struct net *net = sock_net(skb->sk); | 	struct net *net = sock_net(skb->sk); | ||||||
| 	struct nlattr *tb[NETNSA_MAX + 1]; | 	struct nlattr *tb[NETNSA_MAX + 1]; | ||||||
|  | 	unsigned long flags; | ||||||
| 	struct net *peer; | 	struct net *peer; | ||||||
| 	int nsid, err; | 	int nsid, err; | ||||||
| 
 | 
 | ||||||
|  | @ -527,12 +555,14 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh) | ||||||
| 	if (IS_ERR(peer)) | 	if (IS_ERR(peer)) | ||||||
| 		return PTR_ERR(peer); | 		return PTR_ERR(peer); | ||||||
| 
 | 
 | ||||||
|  | 	spin_lock_irqsave(&nsid_lock, flags); | ||||||
| 	if (__peernet2id(net, peer) >= 0) { | 	if (__peernet2id(net, peer) >= 0) { | ||||||
| 		err = -EEXIST; | 		err = -EEXIST; | ||||||
| 		goto out; | 		goto out; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	err = alloc_netid(net, peer, nsid); | 	err = alloc_netid(net, peer, nsid); | ||||||
|  | 	spin_unlock_irqrestore(&nsid_lock, flags); | ||||||
| 	if (err >= 0) { | 	if (err >= 0) { | ||||||
| 		rtnl_net_notifyid(net, RTM_NEWNSID, err); | 		rtnl_net_notifyid(net, RTM_NEWNSID, err); | ||||||
| 		err = 0; | 		err = 0; | ||||||
|  | @ -601,7 +631,7 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh) | ||||||
| 		goto out; | 		goto out; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	id = __peernet2id(net, peer); | 	id = peernet2id(net, peer); | ||||||
| 	err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, | 	err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, | ||||||
| 			    RTM_GETNSID, net, id); | 			    RTM_GETNSID, net, id); | ||||||
| 	if (err < 0) | 	if (err < 0) | ||||||
|  | @ -654,10 +684,11 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) | ||||||
| 		.idx = 0, | 		.idx = 0, | ||||||
| 		.s_idx = cb->args[0], | 		.s_idx = cb->args[0], | ||||||
| 	}; | 	}; | ||||||
|  | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
| 	ASSERT_RTNL(); | 	spin_lock_irqsave(&nsid_lock, flags); | ||||||
| 
 |  | ||||||
| 	idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); | 	idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); | ||||||
|  | 	spin_unlock_irqrestore(&nsid_lock, flags); | ||||||
| 
 | 
 | ||||||
| 	cb->args[0] = net_cb.idx; | 	cb->args[0] = net_cb.idx; | ||||||
| 	return skb->len; | 	return skb->len; | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Nicolas Dichtel
						Nicolas Dichtel