forked from mirrors/linux
		
	net_sched: remove tcf_block_put_deferred()
In commit7aa0045dad("net_sched: introduce a workqueue for RCU callbacks of tc filter") I defer tcf_chain_flush() to a workqueue, this causes a use-after-free because qdisc is already destroyed after we queue this work. The tcf_block_put_deferred() is no longer necessary after we get RTNL for each tc filter destroy work, no others could jump in at this point. Same for tcf_chain_hold(), we are fully serialized now. This also reduces one indirection therefore makes the code more readable. Note this brings back a rcu_barrier(), however comparing to the code prior to commit7aa0045dadwe still reduced one rcu_barrier(). For net-next, we can consider to refcnt tcf block to avoid it. Fixes:7aa0045dad("net_sched: introduce a workqueue for RCU callbacks of tc filter") Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Jiri Pirko <jiri@resnulli.us> Cc: John Fastabend <john.fastabend@gmail.com> Cc: Jamal Hadi Salim <jhs@mojatatu.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Eric Dumazet <edumazet@google.com> Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									f9e56baf03
								
							
						
					
					
						commit
						822e86d997
					
				
					 1 changed files with 8 additions and 29 deletions
				
			
		| 
						 | 
					@ -280,8 +280,8 @@ static void tcf_block_put_final(struct work_struct *work)
 | 
				
			||||||
	struct tcf_block *block = container_of(work, struct tcf_block, work);
 | 
						struct tcf_block *block = container_of(work, struct tcf_block, work);
 | 
				
			||||||
	struct tcf_chain *chain, *tmp;
 | 
						struct tcf_chain *chain, *tmp;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* At this point, all the chains should have refcnt == 1. */
 | 
					 | 
				
			||||||
	rtnl_lock();
 | 
						rtnl_lock();
 | 
				
			||||||
 | 
						/* Only chain 0 should be still here. */
 | 
				
			||||||
	list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
 | 
						list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
 | 
				
			||||||
		tcf_chain_put(chain);
 | 
							tcf_chain_put(chain);
 | 
				
			||||||
	rtnl_unlock();
 | 
						rtnl_unlock();
 | 
				
			||||||
| 
						 | 
					@ -289,23 +289,17 @@ static void tcf_block_put_final(struct work_struct *work)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* XXX: Standalone actions are not allowed to jump to any chain, and bound
 | 
					/* XXX: Standalone actions are not allowed to jump to any chain, and bound
 | 
				
			||||||
 * actions should be all removed after flushing. However, filters are destroyed
 | 
					 * actions should be all removed after flushing. However, filters are now
 | 
				
			||||||
 * in RCU callbacks, we have to hold the chains first, otherwise we would
 | 
					 * destroyed in tc filter workqueue with RTNL lock, they can not race here.
 | 
				
			||||||
 * always race with RCU callbacks on this list without proper locking.
 | 
					 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void tcf_block_put_deferred(struct work_struct *work)
 | 
					void tcf_block_put(struct tcf_block *block)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct tcf_block *block = container_of(work, struct tcf_block, work);
 | 
						struct tcf_chain *chain, *tmp;
 | 
				
			||||||
	struct tcf_chain *chain;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rtnl_lock();
 | 
						if (!block)
 | 
				
			||||||
	/* Hold a refcnt for all chains, except 0, in case they are gone. */
 | 
							return;
 | 
				
			||||||
	list_for_each_entry(chain, &block->chain_list, list)
 | 
					 | 
				
			||||||
		if (chain->index)
 | 
					 | 
				
			||||||
			tcf_chain_hold(chain);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* No race on the list, because no chain could be destroyed. */
 | 
						list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
 | 
				
			||||||
	list_for_each_entry(chain, &block->chain_list, list)
 | 
					 | 
				
			||||||
		tcf_chain_flush(chain);
 | 
							tcf_chain_flush(chain);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	INIT_WORK(&block->work, tcf_block_put_final);
 | 
						INIT_WORK(&block->work, tcf_block_put_final);
 | 
				
			||||||
| 
						 | 
					@ -314,21 +308,6 @@ static void tcf_block_put_deferred(struct work_struct *work)
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	rcu_barrier();
 | 
						rcu_barrier();
 | 
				
			||||||
	tcf_queue_work(&block->work);
 | 
						tcf_queue_work(&block->work);
 | 
				
			||||||
	rtnl_unlock();
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void tcf_block_put(struct tcf_block *block)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (!block)
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	INIT_WORK(&block->work, tcf_block_put_deferred);
 | 
					 | 
				
			||||||
	/* Wait for existing RCU callbacks to cool down, make sure their works
 | 
					 | 
				
			||||||
	 * have been queued before this. We can not flush pending works here
 | 
					 | 
				
			||||||
	 * because we are holding the RTNL lock.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	rcu_barrier();
 | 
					 | 
				
			||||||
	tcf_queue_work(&block->work);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(tcf_block_put);
 | 
					EXPORT_SYMBOL(tcf_block_put);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue