Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux...
[pandora-kernel.git] / arch / x86 / xen / smp.c
index d53bf9d..94e6900 100644 (file)
 #include "xen-ops.h"
 #include "mmu.h"
 
-static cpumask_t cpu_initialized_map;
-static DEFINE_PER_CPU(int, resched_irq);
-static DEFINE_PER_CPU(int, callfunc_irq);
+static cpumask_t xen_cpu_initialized_map;
+static DEFINE_PER_CPU(int, resched_irq) = -1;
+static DEFINE_PER_CPU(int, callfunc_irq) = -1;
+static DEFINE_PER_CPU(int, debug_irq) = -1;
 
 /*
  * Structure and data for smp_call_function(). This is designed to minimise
@@ -72,6 +73,7 @@ static __cpuinit void cpu_bringup_and_idle(void)
        int cpu = smp_processor_id();
 
        cpu_init();
+       xen_enable_sysenter();
 
        preempt_disable();
        per_cpu(cpu_state, cpu) = CPU_ONLINE;
@@ -88,9 +90,7 @@ static __cpuinit void cpu_bringup_and_idle(void)
 static int xen_smp_intr_init(unsigned int cpu)
 {
        int rc;
-       const char *resched_name, *callfunc_name;
-
-       per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1;
+       const char *resched_name, *callfunc_name, *debug_name;
 
        resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
        rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
@@ -114,6 +114,14 @@ static int xen_smp_intr_init(unsigned int cpu)
                goto fail;
        per_cpu(callfunc_irq, cpu) = rc;
 
+       debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
+       rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
+                                    IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
+                                    debug_name, NULL);
+       if (rc < 0)
+               goto fail;
+       per_cpu(debug_irq, cpu) = rc;
+
        return 0;
 
  fail:
@@ -121,6 +129,8 @@ static int xen_smp_intr_init(unsigned int cpu)
                unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
        if (per_cpu(callfunc_irq, cpu) >= 0)
                unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
+       if (per_cpu(debug_irq, cpu) >= 0)
+               unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
        return rc;
 }
 
@@ -146,7 +156,7 @@ void __init xen_smp_prepare_boot_cpu(void)
           old memory can be recycled */
        make_lowmem_page_readwrite(&per_cpu__gdt_page);
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+       for_each_possible_cpu(cpu) {
                cpus_clear(per_cpu(cpu_sibling_map, cpu));
                /*
                 * cpu_core_map lives in a per cpu area that is cleared
@@ -163,7 +173,7 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus)
 {
        unsigned cpu;
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+       for_each_possible_cpu(cpu) {
                cpus_clear(per_cpu(cpu_sibling_map, cpu));
                /*
                 * cpu_core_ map will be zeroed when the per
@@ -179,11 +189,11 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus)
        if (xen_smp_intr_init(0))
                BUG();
 
-       cpu_initialized_map = cpumask_of_cpu(0);
+       xen_cpu_initialized_map = cpumask_of_cpu(0);
 
        /* Restrict the possible_map according to max_cpus. */
        while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
-               for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--)
+               for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--)
                        continue;
                cpu_clear(cpu, cpu_possible_map);
        }
@@ -210,7 +220,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
        struct vcpu_guest_context *ctxt;
        struct gdt_page *gdt = &per_cpu(gdt_page, cpu);
 
-       if (cpu_test_and_set(cpu, cpu_initialized_map))
+       if (cpu_test_and_set(cpu, xen_cpu_initialized_map))
                return 0;
 
        ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
@@ -239,10 +249,10 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
        ctxt->gdt_ents      = ARRAY_SIZE(gdt->gdt);
 
        ctxt->user_regs.cs = __KERNEL_CS;
-       ctxt->user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs);
+       ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
 
        ctxt->kernel_ss = __KERNEL_DS;
-       ctxt->kernel_sp = idle->thread.esp0;
+       ctxt->kernel_sp = idle->thread.sp0;
 
        ctxt->event_callback_cs     = __KERNEL_CS;
        ctxt->event_callback_eip    = (unsigned long)xen_hypervisor_callback;
@@ -356,6 +366,7 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
         */
        irq_enter();
        (*func)(info);
+       __get_cpu_var(irq_stat).irq_call_count++;
        irq_exit();
 
        if (wait) {