mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	net: Add lockdep asserts to ____napi_schedule().
____napi_schedule() needs to be invoked with disabled interrupts due to __raise_softirq_irqoff (in order not to corrupt the per-CPU list). ____napi_schedule() needs also to be invoked from an interrupt context so that the raised-softirq is processed while the interrupt context is left. Add lockdep asserts for both conditions. While this is the second time the irq/softirq check is needed, provide a generic lockdep_assert_softirq_will_run() which is used by both caller. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									d96657dc92
								
							
						
					
					
						commit
						fbd9a2ceba
					
				
					 2 changed files with 11 additions and 1 deletions
				
			
		| 
						 | 
				
			
			@ -329,6 +329,12 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
 | 
			
		|||
 | 
			
		||||
#define lockdep_assert_none_held_once()		\
 | 
			
		||||
	lockdep_assert_once(!current->lockdep_depth)
 | 
			
		||||
/*
 | 
			
		||||
 * Ensure that softirq is handled within the callchain and not delayed and
 | 
			
		||||
 * handled by chance.
 | 
			
		||||
 */
 | 
			
		||||
#define lockdep_assert_softirq_will_run()	\
 | 
			
		||||
	lockdep_assert_once(hardirq_count() | softirq_count())
 | 
			
		||||
 | 
			
		||||
#define lockdep_recursing(tsk)	((tsk)->lockdep_recursion)
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -414,6 +420,7 @@ extern int lockdep_is_held(const void *);
 | 
			
		|||
#define lockdep_assert_held_read(l)		do { (void)(l); } while (0)
 | 
			
		||||
#define lockdep_assert_held_once(l)		do { (void)(l); } while (0)
 | 
			
		||||
#define lockdep_assert_none_held_once()	do { } while (0)
 | 
			
		||||
#define lockdep_assert_softirq_will_run()	do { } while (0)
 | 
			
		||||
 | 
			
		||||
#define lockdep_recursing(tsk)			(0)
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -4265,6 +4265,9 @@ static inline void ____napi_schedule(struct softnet_data *sd,
 | 
			
		|||
{
 | 
			
		||||
	struct task_struct *thread;
 | 
			
		||||
 | 
			
		||||
	lockdep_assert_softirq_will_run();
 | 
			
		||||
	lockdep_assert_irqs_disabled();
 | 
			
		||||
 | 
			
		||||
	if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
 | 
			
		||||
		/* Paired with smp_mb__before_atomic() in
 | 
			
		||||
		 * napi_enable()/dev_set_threaded().
 | 
			
		||||
| 
						 | 
				
			
			@ -4872,7 +4875,7 @@ int __netif_rx(struct sk_buff *skb)
 | 
			
		|||
{
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	lockdep_assert_once(hardirq_count() | softirq_count());
 | 
			
		||||
	lockdep_assert_softirq_will_run();
 | 
			
		||||
 | 
			
		||||
	trace_netif_rx_entry(skb);
 | 
			
		||||
	ret = netif_rx_internal(skb);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue