cpumask: Avoid cpumask_t in arch/x86/kernel/apic/nmi.c
authorRusty Russell <rusty@rustcorp.com.au>
Tue, 3 Nov 2009 04:23:52 +0000 (14:53 +1030)
committerIngo Molnar <mingo@elte.hu>
Wed, 4 Nov 2009 12:17:53 +0000 (13:17 +0100)
Ingo wants the certainty of a static cpumask (rather than a
cpumask_var_t), but cpumask_t will some day be undefined to
avoid on-stack declarations.

This is what DECLARE_BITMAP/to_cpumask() is for.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
LKML-Reference: <200911031453.52394.rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/apic/nmi.c

index 7ff61d6..6389432 100644 (file)
@@ -39,7 +39,8 @@
 int unknown_nmi_panic;
 int nmi_watchdog_enabled;
 
-static cpumask_t backtrace_mask __read_mostly;
+/* For reliability, we're prepared to waste bits here. */
+static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
 
 /* nmi_active:
  * >0: the lapic NMI watchdog is active, but can be disabled
@@ -414,7 +415,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
        }
 
        /* We can be called before check_nmi_watchdog, hence NULL check. */
-       if (cpumask_test_cpu(cpu, &backtrace_mask)) {
+       if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
                static DEFINE_SPINLOCK(lock);   /* Serialise the printks */
 
                spin_lock(&lock);
@@ -422,7 +423,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
                show_regs(regs);
                dump_stack();
                spin_unlock(&lock);
-               cpumask_clear_cpu(cpu, &backtrace_mask);
+               cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
 
                rc = 1;
        }
@@ -558,14 +559,14 @@ void arch_trigger_all_cpu_backtrace(void)
 {
        int i;
 
-       cpumask_copy(&backtrace_mask, cpu_online_mask);
+       cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
 
        printk(KERN_INFO "sending NMI to all CPUs:\n");
        apic->send_IPI_all(NMI_VECTOR);
 
        /* Wait for up to 10 seconds for all CPUs to do the backtrace */
        for (i = 0; i < 10 * 1000; i++) {
-               if (cpumask_empty(&backtrace_mask))
+               if (cpumask_empty(to_cpumask(backtrace_mask)))
                        break;
                mdelay(1);
        }