forked from mirrors/linux
		
	 92cf211874
			
		
	
	
		92cf211874
		
	
	
	
	
		
			
			preempt_mask.h defines all the preempt_count semantics and related symbols: preempt, softirq, hardirq, nmi, preempt active, need resched, etc... preempt.h defines the accessors and mutators of preempt_count. But there is a messy dependency game around those two header files: * preempt_mask.h includes preempt.h in order to access preempt_count() * preempt_mask.h defines all preempt_count semantic and symbols except PREEMPT_NEED_RESCHED that is needed by asm/preempt.h Thus we need to define it from preempt.h, right before including asm/preempt.h, instead of defining it to preempt_mask.h with the other preempt_count symbols. Therefore the preempt_count semantics happen to be spread out. * We plan to introduce preempt_active_[enter,exit]() to consolidate preempt_schedule*() code. But we'll need to access both preempt_count mutators (preempt_count_add()) and preempt_count symbols (PREEMPT_ACTIVE, PREEMPT_OFFSET). The usual place to define preempt operations is in preempt.h but then we'll need symbols in preempt_mask.h which already includes preempt.h. So we end up with a ressource circle dependency. Lets merge preempt_mask.h into preempt.h to solve these dependency issues. This way we gather semantic symbols and operation definition of preempt_count in a single file. This is a dumb copy-paste merge. Further merge re-arrangments are performed in a subsequent patch to ease review. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1431441711-29753-2-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
		
			
				
	
	
		
			82 lines
		
	
	
	
		
			1.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			82 lines
		
	
	
	
		
			1.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| #ifndef LINUX_HARDIRQ_H
 | |
| #define LINUX_HARDIRQ_H
 | |
| 
 | |
| #include <linux/preempt.h>
 | |
| #include <linux/lockdep.h>
 | |
| #include <linux/ftrace_irq.h>
 | |
| #include <linux/vtime.h>
 | |
| #include <asm/hardirq.h>
 | |
| 
 | |
| 
 | |
| extern void synchronize_irq(unsigned int irq);
 | |
| extern bool synchronize_hardirq(unsigned int irq);
 | |
| 
 | |
| #if defined(CONFIG_TINY_RCU)
 | |
| 
 | |
| static inline void rcu_nmi_enter(void)
 | |
| {
 | |
| }
 | |
| 
 | |
| static inline void rcu_nmi_exit(void)
 | |
| {
 | |
| }
 | |
| 
 | |
| #else
 | |
| extern void rcu_nmi_enter(void);
 | |
| extern void rcu_nmi_exit(void);
 | |
| #endif
 | |
| 
 | |
| /*
 | |
|  * It is safe to do non-atomic ops on ->hardirq_context,
 | |
|  * because NMI handlers may not preempt and the ops are
 | |
|  * always balanced, so the interrupted value of ->hardirq_context
 | |
|  * will always be restored.
 | |
|  */
 | |
| #define __irq_enter()					\
 | |
| 	do {						\
 | |
| 		account_irq_enter_time(current);	\
 | |
| 		preempt_count_add(HARDIRQ_OFFSET);	\
 | |
| 		trace_hardirq_enter();			\
 | |
| 	} while (0)
 | |
| 
 | |
| /*
 | |
|  * Enter irq context (on NO_HZ, update jiffies):
 | |
|  */
 | |
| extern void irq_enter(void);
 | |
| 
 | |
| /*
 | |
|  * Exit irq context without processing softirqs:
 | |
|  */
 | |
| #define __irq_exit()					\
 | |
| 	do {						\
 | |
| 		trace_hardirq_exit();			\
 | |
| 		account_irq_exit_time(current);		\
 | |
| 		preempt_count_sub(HARDIRQ_OFFSET);	\
 | |
| 	} while (0)
 | |
| 
 | |
| /*
 | |
|  * Exit irq context and process softirqs if needed:
 | |
|  */
 | |
| extern void irq_exit(void);
 | |
| 
 | |
| #define nmi_enter()						\
 | |
| 	do {							\
 | |
| 		lockdep_off();					\
 | |
| 		ftrace_nmi_enter();				\
 | |
| 		BUG_ON(in_nmi());				\
 | |
| 		preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET);	\
 | |
| 		rcu_nmi_enter();				\
 | |
| 		trace_hardirq_enter();				\
 | |
| 	} while (0)
 | |
| 
 | |
| #define nmi_exit()						\
 | |
| 	do {							\
 | |
| 		trace_hardirq_exit();				\
 | |
| 		rcu_nmi_exit();					\
 | |
| 		BUG_ON(!in_nmi());				\
 | |
| 		preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET);	\
 | |
| 		ftrace_nmi_exit();				\
 | |
| 		lockdep_on();					\
 | |
| 	} while (0)
 | |
| 
 | |
| #endif /* LINUX_HARDIRQ_H */
 |