xen/events: mask events when changing their VCPU binding
[pandora-kernel.git] / drivers / xen / events.c
index 6e075cd..f6227cc 100644 (file)
@@ -316,7 +316,7 @@ static void init_evtchn_cpu_bindings(void)
 
        for_each_possible_cpu(i)
                memset(per_cpu(cpu_evtchn_mask, i),
-                      (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
+                      (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
 }
 
 static inline void clear_evtchn(int port)
@@ -600,7 +600,7 @@ static void disable_pirq(struct irq_data *data)
        disable_dynirq(data);
 }
 
-static int find_irq_by_gsi(unsigned gsi)
+int xen_irq_from_gsi(unsigned gsi)
 {
        struct irq_info *info;
 
@@ -614,6 +614,7 @@ static int find_irq_by_gsi(unsigned gsi)
 
        return -1;
 }
+EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
 
 /*
  * Do not make any assumptions regarding the relationship between the
@@ -633,7 +634,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
 
        mutex_lock(&irq_mapping_update_lock);
 
-       irq = find_irq_by_gsi(gsi);
+       irq = xen_irq_from_gsi(gsi);
        if (irq != -1) {
                printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
                       irq, gsi);
@@ -1175,7 +1176,7 @@ static void __xen_evtchn_do_upcall(void)
 {
        int start_word_idx, start_bit_idx;
        int word_idx, bit_idx;
-       int i;
+       int i, irq;
        int cpu = get_cpu();
        struct shared_info *s = HYPERVISOR_shared_info;
        struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
@@ -1183,6 +1184,8 @@ static void __xen_evtchn_do_upcall(void)
 
        do {
                unsigned long pending_words;
+               unsigned long pending_bits;
+               struct irq_desc *desc;
 
                vcpu_info->evtchn_upcall_pending = 0;
 
@@ -1193,6 +1196,17 @@ static void __xen_evtchn_do_upcall(void)
                /* Clear master flag /before/ clearing selector flag. */
                wmb();
 #endif
+               if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) {
+                       int evtchn = evtchn_from_irq(irq);
+                       word_idx = evtchn / BITS_PER_LONG;
+                       pending_bits = evtchn % BITS_PER_LONG;
+                       if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) {
+                               desc = irq_to_desc(irq);
+                               if (desc)
+                                       generic_handle_irq_desc(irq, desc);
+                       }
+               }
+
                pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
 
                start_word_idx = __this_cpu_read(current_word_idx);
@@ -1201,7 +1215,6 @@ static void __xen_evtchn_do_upcall(void)
                word_idx = start_word_idx;
 
                for (i = 0; pending_words != 0; i++) {
-                       unsigned long pending_bits;
                        unsigned long words;
 
                        words = MASK_LSBS(pending_words, word_idx);
@@ -1230,8 +1243,7 @@ static void __xen_evtchn_do_upcall(void)
 
                        do {
                                unsigned long bits;
-                               int port, irq;
-                               struct irq_desc *desc;
+                               int port;
 
                                bits = MASK_LSBS(pending_bits, bit_idx);
 
@@ -1328,8 +1340,10 @@ void rebind_evtchn_irq(int evtchn, int irq)
 /* Rebind an evtchn so that it gets delivered to a specific cpu */
 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
 {
+       struct shared_info *s = HYPERVISOR_shared_info;
        struct evtchn_bind_vcpu bind_vcpu;
        int evtchn = evtchn_from_irq(irq);
+       int masked;
 
        if (!VALID_EVTCHN(evtchn))
                return -1;
@@ -1345,6 +1359,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
        bind_vcpu.port = evtchn;
        bind_vcpu.vcpu = tcpu;
 
+       /*
+        * Mask the event while changing the VCPU binding to prevent
+        * it being delivered on an unexpected VCPU.
+        */
+       masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
+
        /*
         * If this fails, it usually just indicates that we're dealing with a
         * virq or IPI channel, which don't actually need to be rebound. Ignore
@@ -1353,6 +1373,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
        if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
                bind_evtchn_to_cpu(evtchn, tcpu);
 
+       if (!masked)
+               unmask_evtchn(evtchn);
+
        return 0;
 }