sh: Fix sh4a llsc-based cmpxchg()
authorAoi Shinkai <shinkoi2005@gmail.com>
Wed, 10 Jun 2009 16:15:42 +0000 (16:15 +0000)
committerPaul Mundt <lethal@linux-sh.org>
Thu, 11 Jun 2009 06:31:55 +0000 (09:31 +0300)
This fixes up a typo in the ll/sc based cmpxchg code which apparently
wasn't getting a lot of testing due to the swapped old/new pair. With
that fixed up, the ll/sc code also starts using it and provides its own
atomic_add_unless().

Signed-off-by: Aoi Shinkai <shinkoi2005@gmail.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/include/asm/atomic-llsc.h
arch/sh/include/asm/atomic.h
arch/sh/include/asm/cmpxchg-llsc.h
arch/sh/include/asm/spinlock.h

index 4b00b78..b040e1e 100644 (file)
@@ -104,4 +104,31 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
        : "t");
 }
 
+#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+
+       return c != (u);
+}
+
 #endif /* __ASM_SH_ATOMIC_LLSC_H */
index 6327ffb..978b58e 100644 (file)
@@ -45,7 +45,7 @@
 #define atomic_inc(v) atomic_add(1,(v))
 #define atomic_dec(v) atomic_sub(1,(v))
 
-#ifndef CONFIG_GUSA_RB
+#if !defined(CONFIG_GUSA_RB) && !defined(CONFIG_CPU_SH4A)
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
        int ret;
@@ -73,7 +73,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
 
        return ret != u;
 }
-#endif
+#endif /* !CONFIG_GUSA_RB && !CONFIG_CPU_SH4A */
 
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
index 0fac3da..4713666 100644 (file)
@@ -55,7 +55,7 @@ __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
                "mov            %0, %1                          \n\t"
                "cmp/eq         %1, %3                          \n\t"
                "bf             2f                              \n\t"
-               "mov            %3, %0                          \n\t"
+               "mov            %4, %0                          \n\t"
                "2:                                             \n\t"
                "movco.l        %0, @%2                         \n\t"
                "bf             1b                              \n\t"
index 6028356..a28c9f0 100644 (file)
@@ -26,7 +26,7 @@
 #define __raw_spin_is_locked(x)                ((x)->lock <= 0)
 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
 #define __raw_spin_unlock_wait(x) \
-       do { cpu_relax(); } while ((x)->lock)
+       do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0)
 
 /*
  * Simple spin lock operations.  There are two variants, one clears IRQ's