atomic.h: add atomic64 cmpxchg, xchg and add_unless to x86_64
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Tue, 8 May 2007 07:34:36 +0000 (00:34 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Tue, 8 May 2007 18:15:19 +0000 (11:15 -0700)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/asm-x86_64/atomic.h

index 706ca4b..80e4fdb 100644 (file)
@@ -375,8 +375,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t *v)
        long __i = i;
        __asm__ __volatile__(
                LOCK_PREFIX "xaddq %0, %1;"
-               :"=r"(i)
-               :"m"(v->counter), "0"(i));
+               :"+r" (i), "+m" (v->counter)
+               : : "memory");
        return i + __i;
 }
 
@@ -388,7 +388,10 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
 #define atomic64_inc_return(v)  (atomic64_add_return(1,v))
 #define atomic64_dec_return(v)  (atomic64_sub_return(1,v))
 
-#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
 /**
@@ -402,7 +405,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
  */
 #define atomic_add_unless(v, a, u)                             \
 ({                                                             \
-       int c, old;                                             \
+       __typeof__((v)->counter) c, old;                        \
        c = atomic_read(v);                                     \
        for (;;) {                                              \
                if (unlikely(c == (u)))                         \
@@ -416,6 +419,31 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
 })
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+#define atomic64_add_unless(v, a, u)                           \
+({                                                             \
+       __typeof__((v)->counter) c, old;                        \
+       c = atomic64_read(v);                                   \
+       for (;;) {                                              \
+               if (unlikely(c == (u)))                         \
+                       break;                                  \
+               old = atomic64_cmpxchg((v), c, c + (a));        \
+               if (likely(old == c))                           \
+                       break;                                  \
+               c = old;                                        \
+       }                                                       \
+       c != (u);                                               \
+})
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
 /* These are x86-specific, used by some header files */
 #define atomic_clear_mask(mask, addr) \
 __asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \