X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-powerpc%2Fatomic.h;h=f3fc733758f5ccfeb27e1e3e51df7d4039608cbb;hb=e7849f16c13476288fe4fbd420975e8456c75aa0;hp=f038e33e6d48677d0da1d5150dfc0b5f1f8a6931;hpb=51e6ed23fc95c3e710d8a98717924ccb2571aa66;p=pandora-kernel.git diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h index f038e33e6d48..f3fc733758f5 100644 --- a/include/asm-powerpc/atomic.h +++ b/include/asm-powerpc/atomic.h @@ -5,17 +5,29 @@ * PowerPC atomic operations */ -typedef struct { volatile int counter; } atomic_t; +typedef struct { int counter; } atomic_t; #ifdef __KERNEL__ #include #include #include +#include #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) ((v)->counter) -#define atomic_set(v,i) (((v)->counter) = (i)) +static __inline__ int atomic_read(const atomic_t *v) +{ + int t; + + __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter)); + + return t; +} + +static __inline__ void atomic_set(atomic_t *v, int i) +{ + __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); +} static __inline__ void atomic_add(int a, atomic_t *v) { @@ -165,7 +177,7 @@ static __inline__ int atomic_dec_return(atomic_t *v) return t; } -#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) /** @@ -239,12 +251,23 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) #ifdef __powerpc64__ -typedef struct { volatile long counter; } atomic64_t; +typedef struct { long counter; } atomic64_t; #define ATOMIC64_INIT(i) { (i) } -#define atomic64_read(v) ((v)->counter) -#define atomic64_set(v,i) (((v)->counter) = (i)) +static __inline__ long atomic64_read(const atomic64_t *v) +{ + long t; + + __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter)); + + return t; +} + +static __inline__ void atomic64_set(atomic64_t *v, long i) +{ + __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); +} static __inline__ void atomic64_add(long a, atomic64_t *v) { @@ -413,6 +436,42 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) return t; } +#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) +#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) + +/** + * atomic64_add_unless - add unless the number is a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long t; + + __asm__ __volatile__ ( + LWSYNC_ON_SMP +"1: ldarx %0,0,%1 # atomic_add_unless\n\ + cmpd 0,%0,%3 \n\ + beq- 2f \n\ + add %0,%2,%0 \n" +" stdcx. %0,0,%1 \n\ + bne- 1b \n" + ISYNC_ON_SMP +" subf %0,%2,%0 \n\ +2:" + : "=&r" (t) + : "r" (&v->counter), "r" (a), "r" (u) + : "cc", "memory"); + + return t != u; +} + +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + #endif /* __powerpc64__ */ #include