Merge branch 'agp-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
[pandora-kernel.git] / arch / x86 / xen / spinlock.c
index 5601506..36a5141 100644 (file)
@@ -187,7 +187,6 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
        struct xen_spinlock *prev;
        int irq = __get_cpu_var(lock_kicker_irq);
        int ret;
-       unsigned long flags;
        u64 start;
 
        /* If kicker interrupts not initialized yet, just spin */
@@ -199,16 +198,12 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
        /* announce we're spinning */
        prev = spinning_lock(xl);
 
-       flags = __raw_local_save_flags();
-       if (irq_enable) {
-               ADD_STATS(taken_slow_irqenable, 1);
-               raw_local_irq_enable();
-       }
-
        ADD_STATS(taken_slow, 1);
        ADD_STATS(taken_slow_nested, prev != NULL);
 
        do {
+               unsigned long flags;
+
                /* clear pending */
                xen_clear_irq_pending(irq);
 
@@ -228,6 +223,12 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
                        goto out;
                }
 
+               flags = __raw_local_save_flags();
+               if (irq_enable) {
+                       ADD_STATS(taken_slow_irqenable, 1);
+                       raw_local_irq_enable();
+               }
+
                /*
                 * Block until irq becomes pending.  If we're
                 * interrupted at this point (after the trylock but
@@ -238,13 +239,15 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
                 * pending.
                 */
                xen_poll_irq(irq);
+
+               raw_local_irq_restore(flags);
+
                ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
        } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
 
        kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
 
 out:
-       raw_local_irq_restore(flags);
        unspinning_lock(xl, prev);
        spin_time_accum_blocked(start);
 
@@ -323,8 +326,13 @@ static void xen_spin_unlock(struct raw_spinlock *lock)
        smp_wmb();              /* make sure no writes get moved after unlock */
        xl->lock = 0;           /* release lock */
 
-       /* make sure unlock happens before kick */
-       barrier();
+       /*
+        * Make sure unlock happens before checking for waiting
+        * spinners.  We need a strong barrier to enforce the
+        * write-read ordering to different memory locations, as the
+        * CPU makes no implied guarantees about their ordering.
+        */
+       mb();
 
        if (unlikely(xl->spinners))
                xen_spin_unlock_slow(xl);