forked from mirrors/linux
		
	tipc: fix a deadlock when flushing scheduled work
In the commitfdeba99b1e("tipc: fix use-after-free in tipc_bcast_get_mode"), we're trying to make sure the tipc_net_finalize_work work item finished if it enqueued. But calling flush_scheduled_work() is not just affecting above work item but either any scheduled work. This has turned out to be overkill and caused to deadlock as syzbot reported: ====================================================== WARNING: possible circular locking dependency detected 5.9.0-rc2-next-20200828-syzkaller #0 Not tainted ------------------------------------------------------ kworker/u4:6/349 is trying to acquire lock: ffff8880aa063d38 ((wq_completion)events){+.+.}-{0:0}, at: flush_workqueue+0xe1/0x13e0 kernel/workqueue.c:2777 but task is already holding lock: ffffffff8a879430 (pernet_ops_rwsem){++++}-{3:3}, at: cleanup_net+0x9b/0xb10 net/core/net_namespace.c:565 [...] Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(pernet_ops_rwsem); lock(&sb->s_type->i_mutex_key#13); lock(pernet_ops_rwsem); lock((wq_completion)events); *** DEADLOCK *** [...] v1: To fix the original issue, we replace above calling by introducing a bit flag. When a namespace cleaned-up, bit flag is set to zero and: - tipc_net_finalize functionial just does return immediately. - tipc_net_finalize_work does not enqueue into the scheduled work queue. v2: Use cancel_work_sync() helper to make sure ONLY the tipc_net_finalize_work() stopped before releasing bcbase object. Reported-by: syzbot+d5aa7e0385f6a5d0f4fd@syzkaller.appspotmail.com Fixes:fdeba99b1e("tipc: fix use-after-free in tipc_bcast_get_mode") Acked-by: Jon Maloy <jmaloy@redhat.com> Signed-off-by: Hoang Huu Le <hoang.h.le@dektech.com.au> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
		
							parent
							
								
									02a20d4fef
								
							
						
					
					
						commit
						d966ddcc38
					
				
					 4 changed files with 19 additions and 19 deletions
				
			
		|  | @ -60,6 +60,7 @@ static int __net_init tipc_init_net(struct net *net) | ||||||
| 	tn->trial_addr = 0; | 	tn->trial_addr = 0; | ||||||
| 	tn->addr_trial_end = 0; | 	tn->addr_trial_end = 0; | ||||||
| 	tn->capabilities = TIPC_NODE_CAPABILITIES; | 	tn->capabilities = TIPC_NODE_CAPABILITIES; | ||||||
|  | 	INIT_WORK(&tn->final_work.work, tipc_net_finalize_work); | ||||||
| 	memset(tn->node_id, 0, sizeof(tn->node_id)); | 	memset(tn->node_id, 0, sizeof(tn->node_id)); | ||||||
| 	memset(tn->node_id_string, 0, sizeof(tn->node_id_string)); | 	memset(tn->node_id_string, 0, sizeof(tn->node_id_string)); | ||||||
| 	tn->mon_threshold = TIPC_DEF_MON_THRESHOLD; | 	tn->mon_threshold = TIPC_DEF_MON_THRESHOLD; | ||||||
|  | @ -107,13 +108,13 @@ static int __net_init tipc_init_net(struct net *net) | ||||||
| 
 | 
 | ||||||
| static void __net_exit tipc_exit_net(struct net *net) | static void __net_exit tipc_exit_net(struct net *net) | ||||||
| { | { | ||||||
|  | 	struct tipc_net *tn = tipc_net(net); | ||||||
|  | 
 | ||||||
| 	tipc_detach_loopback(net); | 	tipc_detach_loopback(net); | ||||||
|  | 	/* Make sure the tipc_net_finalize_work() finished */ | ||||||
|  | 	cancel_work_sync(&tn->final_work.work); | ||||||
| 	tipc_net_stop(net); | 	tipc_net_stop(net); | ||||||
| 
 | 
 | ||||||
| 	/* Make sure the tipc_net_finalize_work stopped
 |  | ||||||
| 	 * before releasing the resources. |  | ||||||
| 	 */ |  | ||||||
| 	flush_scheduled_work(); |  | ||||||
| 	tipc_bcast_stop(net); | 	tipc_bcast_stop(net); | ||||||
| 	tipc_nametbl_stop(net); | 	tipc_nametbl_stop(net); | ||||||
| 	tipc_sk_rht_destroy(net); | 	tipc_sk_rht_destroy(net); | ||||||
|  |  | ||||||
|  | @ -90,6 +90,12 @@ extern unsigned int tipc_net_id __read_mostly; | ||||||
| extern int sysctl_tipc_rmem[3] __read_mostly; | extern int sysctl_tipc_rmem[3] __read_mostly; | ||||||
| extern int sysctl_tipc_named_timeout __read_mostly; | extern int sysctl_tipc_named_timeout __read_mostly; | ||||||
| 
 | 
 | ||||||
|  | struct tipc_net_work { | ||||||
|  | 	struct work_struct work; | ||||||
|  | 	struct net *net; | ||||||
|  | 	u32 addr; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
| struct tipc_net { | struct tipc_net { | ||||||
| 	u8  node_id[NODE_ID_LEN]; | 	u8  node_id[NODE_ID_LEN]; | ||||||
| 	u32 node_addr; | 	u32 node_addr; | ||||||
|  | @ -143,6 +149,8 @@ struct tipc_net { | ||||||
| 	/* TX crypto handler */ | 	/* TX crypto handler */ | ||||||
| 	struct tipc_crypto *crypto_tx; | 	struct tipc_crypto *crypto_tx; | ||||||
| #endif | #endif | ||||||
|  | 	/* Work item for net finalize */ | ||||||
|  | 	struct tipc_net_work final_work; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| static inline struct tipc_net *tipc_net(struct net *net) | static inline struct tipc_net *tipc_net(struct net *net) | ||||||
|  |  | ||||||
|  | @ -105,12 +105,6 @@ | ||||||
|  *     - A local spin_lock protecting the queue of subscriber events. |  *     - A local spin_lock protecting the queue of subscriber events. | ||||||
| */ | */ | ||||||
| 
 | 
 | ||||||
| struct tipc_net_work { |  | ||||||
| 	struct work_struct work; |  | ||||||
| 	struct net *net; |  | ||||||
| 	u32 addr; |  | ||||||
| }; |  | ||||||
| 
 |  | ||||||
| static void tipc_net_finalize(struct net *net, u32 addr); | static void tipc_net_finalize(struct net *net, u32 addr); | ||||||
| 
 | 
 | ||||||
| int tipc_net_init(struct net *net, u8 *node_id, u32 addr) | int tipc_net_init(struct net *net, u8 *node_id, u32 addr) | ||||||
|  | @ -142,25 +136,21 @@ static void tipc_net_finalize(struct net *net, u32 addr) | ||||||
| 			     TIPC_CLUSTER_SCOPE, 0, addr); | 			     TIPC_CLUSTER_SCOPE, 0, addr); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void tipc_net_finalize_work(struct work_struct *work) | void tipc_net_finalize_work(struct work_struct *work) | ||||||
| { | { | ||||||
| 	struct tipc_net_work *fwork; | 	struct tipc_net_work *fwork; | ||||||
| 
 | 
 | ||||||
| 	fwork = container_of(work, struct tipc_net_work, work); | 	fwork = container_of(work, struct tipc_net_work, work); | ||||||
| 	tipc_net_finalize(fwork->net, fwork->addr); | 	tipc_net_finalize(fwork->net, fwork->addr); | ||||||
| 	kfree(fwork); |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void tipc_sched_net_finalize(struct net *net, u32 addr) | void tipc_sched_net_finalize(struct net *net, u32 addr) | ||||||
| { | { | ||||||
| 	struct tipc_net_work *fwork = kzalloc(sizeof(*fwork), GFP_ATOMIC); | 	struct tipc_net *tn = tipc_net(net); | ||||||
| 
 | 
 | ||||||
| 	if (!fwork) | 	tn->final_work.net = net; | ||||||
| 		return; | 	tn->final_work.addr = addr; | ||||||
| 	INIT_WORK(&fwork->work, tipc_net_finalize_work); | 	schedule_work(&tn->final_work.work); | ||||||
| 	fwork->net = net; |  | ||||||
| 	fwork->addr = addr; |  | ||||||
| 	schedule_work(&fwork->work); |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void tipc_net_stop(struct net *net) | void tipc_net_stop(struct net *net) | ||||||
|  |  | ||||||
|  | @ -42,6 +42,7 @@ | ||||||
| extern const struct nla_policy tipc_nl_net_policy[]; | extern const struct nla_policy tipc_nl_net_policy[]; | ||||||
| 
 | 
 | ||||||
| int tipc_net_init(struct net *net, u8 *node_id, u32 addr); | int tipc_net_init(struct net *net, u8 *node_id, u32 addr); | ||||||
|  | void tipc_net_finalize_work(struct work_struct *work); | ||||||
| void tipc_sched_net_finalize(struct net *net, u32 addr); | void tipc_sched_net_finalize(struct net *net, u32 addr); | ||||||
| void tipc_net_stop(struct net *net); | void tipc_net_stop(struct net *net); | ||||||
| int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); | int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Hoang Huu Le
						Hoang Huu Le