Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Oct 2010 01:38:55 +0000 (18:38 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Oct 2010 01:38:55 +0000 (18:38 -0700)
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  percpu: Remove the multi-page alignment facility
  x86-32: Allocate irq stacks seperate from percpu area
  x86-32, mm: Remove duplicated #include
  x86, printk: Get rid of <0> from stack output
  x86, kexec: Make sure to stop all CPUs before exiting the kernel
  x86/vsmp: Eliminate kconfig dependency warning

12 files changed:
arch/x86/Kconfig
arch/x86/include/asm/irq.h
arch/x86/include/asm/smp.h
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/reboot.c
arch/x86/kernel/smp.c
arch/x86/kernel/smpboot.c
arch/x86/xen/enlighten.c
arch/x86/xen/smp.c
include/linux/percpu-defs.h

index dfabfef..299fbc8 100644 (file)
@@ -347,6 +347,7 @@ endif
 
 config X86_VSMP
        bool "ScaleMP vSMP"
+       select PARAVIRT_GUEST
        select PARAVIRT
        depends on X86_64 && PCI
        depends on X86_EXTENDED_PLATFORM
index 0bf5b00..13b0eba 100644 (file)
@@ -21,10 +21,8 @@ static inline int irq_canonicalize(int irq)
 
 #ifdef CONFIG_X86_32
 extern void irq_ctx_init(int cpu);
-extern void irq_ctx_exit(int cpu);
 #else
 # define irq_ctx_init(cpu) do { } while (0)
-# define irq_ctx_exit(cpu) do { } while (0)
 #endif
 
 #define __ARCH_HAS_DO_SOFTIRQ
index 4cfc908..4c2f63c 100644 (file)
@@ -50,7 +50,7 @@ struct smp_ops {
        void (*smp_prepare_cpus)(unsigned max_cpus);
        void (*smp_cpus_done)(unsigned max_cpus);
 
-       void (*smp_send_stop)(void);
+       void (*stop_other_cpus)(int wait);
        void (*smp_send_reschedule)(int cpu);
 
        int (*cpu_up)(unsigned cpu);
@@ -73,7 +73,12 @@ extern struct smp_ops smp_ops;
 
 static inline void smp_send_stop(void)
 {
-       smp_ops.smp_send_stop();
+       smp_ops.stop_other_cpus(0);
+}
+
+static inline void stop_other_cpus(void)
+{
+       smp_ops.stop_other_cpus(1);
 }
 
 static inline void smp_prepare_boot_cpu(void)
index 0f6376f..1bc7f75 100644 (file)
@@ -82,11 +82,11 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
                if (kstack_end(stack))
                        break;
                if (i && ((i % STACKSLOTS_PER_LINE) == 0))
-                       printk("\n%s", log_lvl);
-               printk(" %08lx", *stack++);
+                       printk(KERN_CONT "\n");
+               printk(KERN_CONT " %08lx", *stack++);
                touch_nmi_watchdog();
        }
-       printk("\n");
+       printk(KERN_CONT "\n");
        show_trace_log_lvl(task, regs, sp, bp, log_lvl);
 }
 
index 57a21f1..6a34048 100644 (file)
@@ -265,20 +265,20 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
                if (stack >= irq_stack && stack <= irq_stack_end) {
                        if (stack == irq_stack_end) {
                                stack = (unsigned long *) (irq_stack_end[-1]);
-                               printk(" <EOI> ");
+                               printk(KERN_CONT " <EOI> ");
                        }
                } else {
                if (((long) stack & (THREAD_SIZE-1)) == 0)
                        break;
                }
                if (i && ((i % STACKSLOTS_PER_LINE) == 0))
-                       printk("\n%s", log_lvl);
-               printk(" %016lx", *stack++);
+                       printk(KERN_CONT "\n");
+               printk(KERN_CONT " %016lx", *stack++);
                touch_nmi_watchdog();
        }
        preempt_enable();
 
-       printk("\n");
+       printk(KERN_CONT "\n");
        show_trace_log_lvl(task, regs, sp, bp, log_lvl);
 }
 
index 50fbbe6..64668db 100644 (file)
@@ -60,9 +60,6 @@ union irq_ctx {
 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
 
-static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, hardirq_stack, THREAD_SIZE);
-static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, softirq_stack, THREAD_SIZE);
-
 static void call_on_stack(void *func, void *stack)
 {
        asm volatile("xchgl     %%ebx,%%esp     \n"
@@ -128,7 +125,7 @@ void __cpuinit irq_ctx_init(int cpu)
        if (per_cpu(hardirq_ctx, cpu))
                return;
 
-       irqctx = &per_cpu(hardirq_stack, cpu);
+       irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER);
        irqctx->tinfo.task              = NULL;
        irqctx->tinfo.exec_domain       = NULL;
        irqctx->tinfo.cpu               = cpu;
@@ -137,7 +134,7 @@ void __cpuinit irq_ctx_init(int cpu)
 
        per_cpu(hardirq_ctx, cpu) = irqctx;
 
-       irqctx = &per_cpu(softirq_stack, cpu);
+       irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER);
        irqctx->tinfo.task              = NULL;
        irqctx->tinfo.exec_domain       = NULL;
        irqctx->tinfo.cpu               = cpu;
@@ -150,11 +147,6 @@ void __cpuinit irq_ctx_init(int cpu)
               cpu, per_cpu(hardirq_ctx, cpu),  per_cpu(softirq_ctx, cpu));
 }
 
-void irq_ctx_exit(int cpu)
-{
-       per_cpu(hardirq_ctx, cpu) = NULL;
-}
-
 asmlinkage void do_softirq(void)
 {
        unsigned long flags;
index f7f53dc..c495aa8 100644 (file)
@@ -635,7 +635,7 @@ void native_machine_shutdown(void)
        /* O.K Now that I'm on the appropriate processor,
         * stop all of the others.
         */
-       smp_send_stop();
+       stop_other_cpus();
 #endif
 
        lapic_shutdown();
index d801210..513deac 100644 (file)
@@ -159,10 +159,10 @@ asmlinkage void smp_reboot_interrupt(void)
        irq_exit();
 }
 
-static void native_smp_send_stop(void)
+static void native_stop_other_cpus(int wait)
 {
        unsigned long flags;
-       unsigned long wait;
+       unsigned long timeout;
 
        if (reboot_force)
                return;
@@ -179,9 +179,12 @@ static void native_smp_send_stop(void)
        if (num_online_cpus() > 1) {
                apic->send_IPI_allbutself(REBOOT_VECTOR);
 
-               /* Don't wait longer than a second */
-               wait = USEC_PER_SEC;
-               while (num_online_cpus() > 1 && wait--)
+               /*
+                * Don't wait longer than a second if the caller
+                * didn't ask us to wait.
+                */
+               timeout = USEC_PER_SEC;
+               while (num_online_cpus() > 1 && (wait || timeout--))
                        udelay(1);
        }
 
@@ -227,7 +230,7 @@ struct smp_ops smp_ops = {
        .smp_prepare_cpus       = native_smp_prepare_cpus,
        .smp_cpus_done          = native_smp_cpus_done,
 
-       .smp_send_stop          = native_smp_send_stop,
+       .stop_other_cpus        = native_stop_other_cpus,
        .smp_send_reschedule    = native_smp_send_reschedule,
 
        .cpu_up                 = native_cpu_up,
index 6c7faec..083e99d 100644 (file)
@@ -1373,7 +1373,6 @@ void play_dead_common(void)
 {
        idle_task_exit();
        reset_lazy_tlbstate();
-       irq_ctx_exit(raw_smp_processor_id());
        c1e_remove_cpu(raw_smp_processor_id());
 
        mb();
index 44ab12d..580da1a 100644 (file)
@@ -1016,7 +1016,7 @@ static void xen_reboot(int reason)
        struct sched_shutdown r = { .reason = reason };
 
 #ifdef CONFIG_SMP
-       smp_send_stop();
+       stop_other_cpus();
 #endif
 
        if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
index 25f232b..f4d0100 100644 (file)
@@ -400,9 +400,9 @@ static void stop_self(void *v)
        BUG();
 }
 
-static void xen_smp_send_stop(void)
+static void xen_stop_other_cpus(int wait)
 {
-       smp_call_function(stop_self, NULL, 0);
+       smp_call_function(stop_self, NULL, wait);
 }
 
 static void xen_smp_send_reschedule(int cpu)
@@ -470,7 +470,7 @@ static const struct smp_ops xen_smp_ops __initdata = {
        .cpu_disable = xen_cpu_disable,
        .play_dead = xen_play_dead,
 
-       .smp_send_stop = xen_smp_send_stop,
+       .stop_other_cpus = xen_stop_other_cpus,
        .smp_send_reschedule = xen_smp_send_reschedule,
 
        .send_call_func_ipi = xen_smp_send_call_function_ipi,
index 018db9a..27ef6b1 100644 (file)
 #define DEFINE_PER_CPU_READ_MOSTLY(type, name)                         \
        DEFINE_PER_CPU_SECTION(type, name, "..readmostly")
 
-/*
- * Declaration/definition used for large per-CPU variables that must be
- * aligned to something larger than the pagesize.
- */
-#define DECLARE_PER_CPU_MULTIPAGE_ALIGNED(type, name, size)            \
-       DECLARE_PER_CPU_SECTION(type, name, "..page_aligned")           \
-       __aligned(size)
-
-#define DEFINE_PER_CPU_MULTIPAGE_ALIGNED(type, name, size)             \
-       DEFINE_PER_CPU_SECTION(type, name, "..page_aligned")            \
-       __aligned(size)
-
 /*
  * Intermodule exports for per-CPU variables.  sparse forgets about
  * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to