* PowerPC atomic operations
*/
-typedef struct { volatile int counter; } atomic_t;
+typedef struct { int counter; } atomic_t;
#ifdef __KERNEL__
+#include <linux/compiler.h>
#include <asm/synch.h>
#include <asm/asm-compat.h>
+#include <asm/system.h>
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ((v)->counter)
-#define atomic_set(v,i) (((v)->counter) = (i))
+static __inline__ int atomic_read(const atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
+
+ return t;
+}
+
+static __inline__ void atomic_set(atomic_t *v, int i)
+{
+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+}
static __inline__ void atomic_add(int a, atomic_t *v)
{
PPC405_ERR77(0,%3)
" stwcx. %0,0,%3 \n\
bne- 1b"
- : "=&r" (t), "=m" (v->counter)
- : "r" (a), "r" (&v->counter), "m" (v->counter)
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (a), "r" (&v->counter)
: "cc");
}
PPC405_ERR77(0,%3)
" stwcx. %0,0,%3 \n\
bne- 1b"
- : "=&r" (t), "=m" (v->counter)
- : "r" (a), "r" (&v->counter), "m" (v->counter)
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (a), "r" (&v->counter)
: "cc");
}
PPC405_ERR77(0,%2)
" stwcx. %0,0,%2 \n\
bne- 1b"
- : "=&r" (t), "=m" (v->counter)
- : "r" (&v->counter), "m" (v->counter)
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (&v->counter)
: "cc");
}
PPC405_ERR77(0,%2)\
" stwcx. %0,0,%2\n\
bne- 1b"
- : "=&r" (t), "=m" (v->counter)
- : "r" (&v->counter), "m" (v->counter)
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (&v->counter)
: "cc");
}
return t;
}
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- for (;;) { \
- if (unlikely(c == (u))) \
- break; \
- old = atomic_cmpxchg((v), c, c + (a)); \
- if (likely(old == c)) \
- break; \
- c = old; \
- } \
- c != (u); \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int t;
+
+ __asm__ __volatile__ (
+ LWSYNC_ON_SMP
+"1: lwarx %0,0,%1 # atomic_add_unless\n\
+ cmpw 0,%0,%3 \n\
+ beq- 2f \n\
+ add %0,%2,%0 \n"
+ PPC405_ERR77(0,%2)
+" stwcx. %0,0,%1 \n\
+ bne- 1b \n"
+ ISYNC_ON_SMP
+" subf %0,%2,%0 \n\
+2:"
+ : "=&r" (t)
+ : "r" (&v->counter), "r" (a), "r" (u)
+ : "cc", "memory");
+
+ return t != u;
+}
+
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
/*
* Atomically test *v and decrement if it is greater than 0.
- * The function returns the old value of *v minus 1.
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
*/
static __inline__ int atomic_dec_if_positive(atomic_t *v)
{
__asm__ __volatile__(
LWSYNC_ON_SMP
"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
- addic. %0,%0,-1\n\
+ cmpwi %0,1\n\
+ addi %0,%0,-1\n\
blt- 2f\n"
PPC405_ERR77(0,%1)
" stwcx. %0,0,%1\n\
bne- 1b"
ISYNC_ON_SMP
"\n\
-2:" : "=&r" (t)
+2:" : "=&b" (t)
: "r" (&v->counter)
: "cc", "memory");
#ifdef __powerpc64__
-typedef struct { volatile long counter; } atomic64_t;
+typedef struct { long counter; } atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
-#define atomic64_read(v) ((v)->counter)
-#define atomic64_set(v,i) (((v)->counter) = (i))
+static __inline__ long atomic64_read(const atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
+
+ return t;
+}
+
+static __inline__ void atomic64_set(atomic64_t *v, long i)
+{
+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+}
static __inline__ void atomic64_add(long a, atomic64_t *v)
{
add %0,%2,%0\n\
stdcx. %0,0,%3 \n\
bne- 1b"
- : "=&r" (t), "=m" (v->counter)
- : "r" (a), "r" (&v->counter), "m" (v->counter)
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (a), "r" (&v->counter)
: "cc");
}
subf %0,%2,%0\n\
stdcx. %0,0,%3 \n\
bne- 1b"
- : "=&r" (t), "=m" (v->counter)
- : "r" (a), "r" (&v->counter), "m" (v->counter)
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (a), "r" (&v->counter)
: "cc");
}
addic %0,%0,1\n\
stdcx. %0,0,%2 \n\
bne- 1b"
- : "=&r" (t), "=m" (v->counter)
- : "r" (&v->counter), "m" (v->counter)
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (&v->counter)
: "cc");
}
addic %0,%0,-1\n\
stdcx. %0,0,%2\n\
bne- 1b"
- : "=&r" (t), "=m" (v->counter)
- : "r" (&v->counter), "m" (v->counter)
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (&v->counter)
: "cc");
}
return t;
}
+#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+ long t;
+
+ __asm__ __volatile__ (
+ LWSYNC_ON_SMP
+"1: ldarx %0,0,%1 # atomic_add_unless\n\
+ cmpd 0,%0,%3 \n\
+ beq- 2f \n\
+ add %0,%2,%0 \n"
+" stdcx. %0,0,%1 \n\
+ bne- 1b \n"
+ ISYNC_ON_SMP
+" subf %0,%2,%0 \n\
+2:"
+ : "=&r" (t)
+ : "r" (&v->counter), "r" (a), "r" (u)
+ : "cc", "memory");
+
+ return t != u;
+}
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
#endif /* __powerpc64__ */
#include <asm-generic/atomic.h>