x86: ticket spin locks: fix asm constraints
authorJan Beulich <jbeulich@novell.com>
Fri, 5 Sep 2008 12:26:39 +0000 (13:26 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 5 Sep 2008 15:04:08 +0000 (17:04 +0200)
In addition to these changes I doubt the 'volatile' on all the ticket
lock asm()-s are really necessary.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/asm-x86/spinlock.h

index 93adae3..acd9bdd 100644 (file)
@@ -101,7 +101,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
                     "1:"
                     "sete %b1\n\t"
                     "movzbl %b1,%0\n\t"
-                    : "=&a" (tmp), "=Q" (new), "+m" (lock->slock)
+                    : "=&a" (tmp), "=&Q" (new), "+m" (lock->slock)
                     :
                     : "memory", "cc");
 
@@ -146,7 +146,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
                     /* don't need lfence here, because loads are in-order */
                     "jmp 1b\n"
                     "2:"
-                    : "+Q" (inc), "+m" (lock->slock), "=r" (tmp)
+                    : "+r" (inc), "+m" (lock->slock), "=&r" (tmp)
                     :
                     : "memory", "cc");
 }
@@ -166,7 +166,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
                     "1:"
                     "sete %b1\n\t"
                     "movzbl %b1,%0\n\t"
-                    : "=&a" (tmp), "=r" (new), "+m" (lock->slock)
+                    : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
                     :
                     : "memory", "cc");