[PATCH] powerpc: Extends HCALL interface for InfiniBand usage
[pandora-kernel.git] / include / asm-powerpc / atomic.h
index ec4b144..bb3c0ab 100644 (file)
@@ -8,6 +8,7 @@
 typedef struct { volatile int counter; } atomic_t;
 
 #ifdef __KERNEL__
+#include <linux/compiler.h>
 #include <asm/synch.h>
 #include <asm/asm-compat.h>
 
@@ -36,7 +37,7 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
        int t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    lwarx   %0,0,%2         # atomic_add_return\n\
        add     %0,%1,%0\n"
        PPC405_ERR77(0,%2)
@@ -72,7 +73,7 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
        int t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    lwarx   %0,0,%2         # atomic_sub_return\n\
        subf    %0,%1,%0\n"
        PPC405_ERR77(0,%2)
@@ -106,7 +107,7 @@ static __inline__ int atomic_inc_return(atomic_t *v)
        int t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    lwarx   %0,0,%1         # atomic_inc_return\n\
        addic   %0,%0,1\n"
        PPC405_ERR77(0,%1)
@@ -150,7 +151,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
        int t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    lwarx   %0,0,%1         # atomic_dec_return\n\
        addic   %0,%0,-1\n"
        PPC405_ERR77(0,%1)
@@ -165,6 +166,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
 }
 
 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
 /**
  * atomic_add_unless - add unless the number is a given value
@@ -175,20 +177,29 @@ static __inline__ int atomic_dec_return(atomic_t *v)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-#define atomic_add_unless(v, a, u)                                                      \
-({                                                                                                                      \
-          int c, old;                                                                                   \
-          c = atomic_read(v);                                                                   \
-          for (;;) {                                                                                     \
-                          if (unlikely(c == (u)))                                               \
-                                          break;                                                                 \
-                          old = atomic_cmpxchg((v), c, c + (a));                 \
-                          if (likely(old == c))                                                   \
-                                          break;                                                                 \
-                          c = old;                                                                             \
-          }                                                                                                       \
-          c != (u);                                                                                       \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int t;
+
+       __asm__ __volatile__ (
+       LWSYNC_ON_SMP
+"1:    lwarx   %0,0,%1         # atomic_add_unless\n\
+       cmpw    0,%0,%3 \n\
+       beq-    2f \n\
+       add     %0,%2,%0 \n"
+       PPC405_ERR77(0,%2)
+"      stwcx.  %0,0,%1 \n\
+       bne-    1b \n"
+       ISYNC_ON_SMP
+"      subf    %0,%2,%0 \n\
+2:"
+       : "=&r" (t)
+       : "r" (&v->counter), "r" (a), "r" (u)
+       : "cc", "memory");
+
+       return t != u;
+}
+
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
 #define atomic_sub_and_test(a, v)      (atomic_sub_return((a), (v)) == 0)
@@ -203,7 +214,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
        int t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    lwarx   %0,0,%1         # atomic_dec_if_positive\n\
        addic.  %0,%0,-1\n\
        blt-    2f\n"
@@ -252,7 +263,7 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v)
        long t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    ldarx   %0,0,%2         # atomic64_add_return\n\
        add     %0,%1,%0\n\
        stdcx.  %0,0,%2 \n\
@@ -286,7 +297,7 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
        long t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    ldarx   %0,0,%2         # atomic64_sub_return\n\
        subf    %0,%1,%0\n\
        stdcx.  %0,0,%2 \n\
@@ -318,7 +329,7 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
        long t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    ldarx   %0,0,%1         # atomic64_inc_return\n\
        addic   %0,%0,1\n\
        stdcx.  %0,0,%1 \n\
@@ -360,7 +371,7 @@ static __inline__ long atomic64_dec_return(atomic64_t *v)
        long t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    ldarx   %0,0,%1         # atomic64_dec_return\n\
        addic   %0,%0,-1\n\
        stdcx.  %0,0,%1\n\
@@ -385,7 +396,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
        long t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    ldarx   %0,0,%1         # atomic64_dec_if_positive\n\
        addic.  %0,%0,-1\n\
        blt-    2f\n\
@@ -402,5 +413,6 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
 
 #endif /* __powerpc64__ */
 
+#include <asm-generic/atomic.h>
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_ATOMIC_H_ */