forked from mirrors/linux
		
	debug lockups: Improve lockup detection, fix generic arch fallback
As Andrew noted, my previous patch ("debug lockups: Improve lockup
detection") broke/removed SysRq-L support from architecture that do
not provide a __trigger_all_cpu_backtrace implementation.
Restore a fallback path and clean up the SysRq-L machinery a bit:
 - Rename the arch method to arch_trigger_all_cpu_backtrace()
 - Simplify the define
 - Document the method a bit - in the hope of more architectures
   adding support for it.
[ The patch touches Sparc code for the rename. ]
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: "David S. Miller" <davem@davemloft.net>
LKML-Reference: <20090802140809.7ec4bb6b.akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
			
			
This commit is contained in:
		
							parent
							
								
									c1dc0b9c0c
								
							
						
					
					
						commit
						47cab6a722
					
				
					 6 changed files with 38 additions and 10 deletions
				
			
		| 
						 | 
					@ -89,8 +89,8 @@ static inline unsigned long get_softint(void)
 | 
				
			||||||
	return retval;
 | 
						return retval;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __trigger_all_cpu_backtrace(void);
 | 
					void arch_trigger_all_cpu_backtrace(void);
 | 
				
			||||||
#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
 | 
					#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void *hardirq_stack[NR_CPUS];
 | 
					extern void *hardirq_stack[NR_CPUS];
 | 
				
			||||||
extern void *softirq_stack[NR_CPUS];
 | 
					extern void *softirq_stack[NR_CPUS];
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -251,7 +251,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __trigger_all_cpu_backtrace(void)
 | 
					void arch_trigger_all_cpu_backtrace(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct thread_info *tp = current_thread_info();
 | 
						struct thread_info *tp = current_thread_info();
 | 
				
			||||||
	struct pt_regs *regs = get_irq_regs();
 | 
						struct pt_regs *regs = get_irq_regs();
 | 
				
			||||||
| 
						 | 
					@ -304,7 +304,7 @@ void __trigger_all_cpu_backtrace(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void sysrq_handle_globreg(int key, struct tty_struct *tty)
 | 
					static void sysrq_handle_globreg(int key, struct tty_struct *tty)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	__trigger_all_cpu_backtrace();
 | 
						arch_trigger_all_cpu_backtrace();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct sysrq_key_op sparc_globalreg_op = {
 | 
					static struct sysrq_key_op sparc_globalreg_op = {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -45,8 +45,8 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
 | 
				
			||||||
			void __user *, size_t *, loff_t *);
 | 
								void __user *, size_t *, loff_t *);
 | 
				
			||||||
extern int unknown_nmi_panic;
 | 
					extern int unknown_nmi_panic;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __trigger_all_cpu_backtrace(void);
 | 
					void arch_trigger_all_cpu_backtrace(void);
 | 
				
			||||||
#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
 | 
					#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void localise_nmi_watchdog(void)
 | 
					static inline void localise_nmi_watchdog(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -554,7 +554,7 @@ int do_nmi_callback(struct pt_regs *regs, int cpu)
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __trigger_all_cpu_backtrace(void)
 | 
					void arch_trigger_all_cpu_backtrace(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -223,7 +223,20 @@ static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
 | 
					static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	trigger_all_cpu_backtrace();
 | 
						/*
 | 
				
			||||||
 | 
						 * Fall back to the workqueue based printing if the
 | 
				
			||||||
 | 
						 * backtrace printing did not succeed or the
 | 
				
			||||||
 | 
						 * architecture has no support for it:
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (!trigger_all_cpu_backtrace()) {
 | 
				
			||||||
 | 
							struct pt_regs *regs = get_irq_regs();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (regs) {
 | 
				
			||||||
 | 
								printk(KERN_INFO "CPU%d:\n", smp_processor_id());
 | 
				
			||||||
 | 
								show_regs(regs);
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							schedule_work(&sysrq_showallcpus);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct sysrq_key_op sysrq_showallcpus_op = {
 | 
					static struct sysrq_key_op sysrq_showallcpus_op = {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -28,8 +28,23 @@ static inline void acpi_nmi_disable(void) { }
 | 
				
			||||||
static inline void acpi_nmi_enable(void) { }
 | 
					static inline void acpi_nmi_enable(void) { }
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifndef trigger_all_cpu_backtrace
 | 
					/*
 | 
				
			||||||
#define trigger_all_cpu_backtrace() do { } while (0)
 | 
					 * Create trigger_all_cpu_backtrace() out of the arch-provided
 | 
				
			||||||
 | 
					 * base function. Return whether such support was available,
 | 
				
			||||||
 | 
					 * to allow calling code to fall back to some other mechanism:
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					#ifdef arch_trigger_all_cpu_backtrace
 | 
				
			||||||
 | 
					static inline bool trigger_all_cpu_backtrace(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						arch_trigger_all_cpu_backtrace();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return true;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					static inline bool trigger_all_cpu_backtrace(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return false;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue