debug lockups: Improve lockup detection, fix generic arch fallback
authorIngo Molnar <mingo@elte.hu>
Mon, 3 Aug 2009 07:31:54 +0000 (09:31 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 3 Aug 2009 07:56:52 +0000 (09:56 +0200)
As Andrew noted, my previous patch ("debug lockups: Improve lockup
detection") broke/removed SysRq-L support from architecture that do
not provide a __trigger_all_cpu_backtrace implementation.

Restore a fallback path and clean up the SysRq-L machinery a bit:

 - Rename the arch method to arch_trigger_all_cpu_backtrace()

 - Simplify the define

 - Document the method a bit - in the hope of more architectures
   adding support for it.

[ The patch touches Sparc code for the rename. ]

Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: "David S. Miller" <davem@davemloft.net>
LKML-Reference: <20090802140809.7ec4bb6b.akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/sparc/include/asm/irq_64.h
arch/sparc/kernel/process_64.c
arch/x86/include/asm/nmi.h
arch/x86/kernel/apic/nmi.c
drivers/char/sysrq.c
include/linux/nmi.h

index 1934f2c..a0b443c 100644 (file)
@@ -89,8 +89,8 @@ static inline unsigned long get_softint(void)
        return retval;
 }
 
        return retval;
 }
 
-void __trigger_all_cpu_backtrace(void);
-#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
 
 extern void *hardirq_stack[NR_CPUS];
 extern void *softirq_stack[NR_CPUS];
 
 extern void *hardirq_stack[NR_CPUS];
 extern void *softirq_stack[NR_CPUS];
index 4041f94..18d6785 100644 (file)
@@ -251,7 +251,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
        }
 }
 
        }
 }
 
-void __trigger_all_cpu_backtrace(void)
+void arch_trigger_all_cpu_backtrace(void)
 {
        struct thread_info *tp = current_thread_info();
        struct pt_regs *regs = get_irq_regs();
 {
        struct thread_info *tp = current_thread_info();
        struct pt_regs *regs = get_irq_regs();
@@ -304,7 +304,7 @@ void __trigger_all_cpu_backtrace(void)
 
 static void sysrq_handle_globreg(int key, struct tty_struct *tty)
 {
 
 static void sysrq_handle_globreg(int key, struct tty_struct *tty)
 {
-       __trigger_all_cpu_backtrace();
+       arch_trigger_all_cpu_backtrace();
 }
 
 static struct sysrq_key_op sparc_globalreg_op = {
 }
 
 static struct sysrq_key_op sparc_globalreg_op = {
index c86e5ed..e63cf7d 100644 (file)
@@ -45,8 +45,8 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
                        void __user *, size_t *, loff_t *);
 extern int unknown_nmi_panic;
 
                        void __user *, size_t *, loff_t *);
 extern int unknown_nmi_panic;
 
-void __trigger_all_cpu_backtrace(void);
-#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
 
 static inline void localise_nmi_watchdog(void)
 {
 
 static inline void localise_nmi_watchdog(void)
 {
index 1bb1ac2..db72202 100644 (file)
@@ -554,7 +554,7 @@ int do_nmi_callback(struct pt_regs *regs, int cpu)
        return 0;
 }
 
        return 0;
 }
 
-void __trigger_all_cpu_backtrace(void)
+void arch_trigger_all_cpu_backtrace(void)
 {
        int i;
 
 {
        int i;
 
index 165f307..50eecfe 100644 (file)
@@ -223,7 +223,20 @@ static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
 
 static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
 {
 
 static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
 {
-       trigger_all_cpu_backtrace();
+       /*
+        * Fall back to the workqueue based printing if the
+        * backtrace printing did not succeed or the
+        * architecture has no support for it:
+        */
+       if (!trigger_all_cpu_backtrace()) {
+               struct pt_regs *regs = get_irq_regs();
+
+               if (regs) {
+                       printk(KERN_INFO "CPU%d:\n", smp_processor_id());
+                       show_regs(regs);
+               }
+               schedule_work(&sysrq_showallcpus);
+       }
 }
 
 static struct sysrq_key_op sysrq_showallcpus_op = {
 }
 
 static struct sysrq_key_op sysrq_showallcpus_op = {
index 29af2d5..b752e80 100644 (file)
@@ -28,8 +28,23 @@ static inline void acpi_nmi_disable(void) { }
 static inline void acpi_nmi_enable(void) { }
 #endif
 
 static inline void acpi_nmi_enable(void) { }
 #endif
 
-#ifndef trigger_all_cpu_backtrace
-#define trigger_all_cpu_backtrace() do { } while (0)
+/*
+ * Create trigger_all_cpu_backtrace() out of the arch-provided
+ * base function. Return whether such support was available,
+ * to allow calling code to fall back to some other mechanism:
+ */
+#ifdef arch_trigger_all_cpu_backtrace
+static inline bool trigger_all_cpu_backtrace(void)
+{
+       arch_trigger_all_cpu_backtrace();
+
+       return true;
+}
+#else
+static inline bool trigger_all_cpu_backtrace(void)
+{
+       return false;
+}
 #endif
 
 #endif
 #endif
 
 #endif