[PATCH] spinlock consolidation
[pandora-kernel.git] / include / asm-ppc64 / spinlock.h
index acd1156..14cb895 100644 (file)
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version
  * 2 of the License, or (at your option) any later version.
+ *
+ * (the type definitions are in asm/spinlock_types.h)
  */
 #include <linux/config.h>
 #include <asm/paca.h>
 #include <asm/hvcall.h>
 #include <asm/iSeries/HvCall.h>
 
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
+#define __raw_spin_is_locked(x)                ((x)->slock != 0)
 
-typedef struct {
-       volatile signed int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
+/*
+ * This returns the old value in the lock, so we succeeded
+ * in getting the lock if the return value is 0.
+ */
+static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock)
+{
+       unsigned long tmp, tmp2;
 
-#ifdef __KERNEL__
-#define SPIN_LOCK_UNLOCKED     (spinlock_t) { 0 }
+       __asm__ __volatile__(
+"      lwz             %1,%3(13)               # __spin_trylock\n\
+1:     lwarx           %0,0,%2\n\
+       cmpwi           0,%0,0\n\
+       bne-            2f\n\
+       stwcx.          %1,0,%2\n\
+       bne-            1b\n\
+       isync\n\
+2:"    : "=&r" (tmp), "=&r" (tmp2)
+       : "r" (&lock->slock), "i" (offsetof(struct paca_struct, lock_token))
+       : "cr0", "memory");
 
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
+       return tmp;
+}
 
-static __inline__ void _raw_spin_unlock(spinlock_t *lock)
+static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock)
 {
-       __asm__ __volatile__("lwsync    # spin_unlock": : :"memory");
-       lock->lock = 0;
+       return __spin_trylock(lock) == 0;
 }
 
 /*
@@ -64,44 +70,15 @@ static __inline__ void _raw_spin_unlock(spinlock_t *lock)
 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
 /* We only yield to the hypervisor if we are in shared processor mode */
 #define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc)
-extern void __spin_yield(spinlock_t *lock);
-extern void __rw_yield(rwlock_t *lock);
+extern void __spin_yield(raw_spinlock_t *lock);
+extern void __rw_yield(raw_rwlock_t *lock);
 #else /* SPLPAR || ISERIES */
 #define __spin_yield(x)        barrier()
 #define __rw_yield(x)  barrier()
 #define SHARED_PROCESSOR       0
 #endif
-extern void spin_unlock_wait(spinlock_t *lock);
-
-/*
- * This returns the old value in the lock, so we succeeded
- * in getting the lock if the return value is 0.
- */
-static __inline__ unsigned long __spin_trylock(spinlock_t *lock)
-{
-       unsigned long tmp, tmp2;
-
-       __asm__ __volatile__(
-"      lwz             %1,%3(13)               # __spin_trylock\n\
-1:     lwarx           %0,0,%2\n\
-       cmpwi           0,%0,0\n\
-       bne-            2f\n\
-       stwcx.          %1,0,%2\n\
-       bne-            1b\n\
-       isync\n\
-2:"    : "=&r" (tmp), "=&r" (tmp2)
-       : "r" (&lock->lock), "i" (offsetof(struct paca_struct, lock_token))
-       : "cr0", "memory");
-
-       return tmp;
-}
-
-static int __inline__ _raw_spin_trylock(spinlock_t *lock)
-{
-       return __spin_trylock(lock) == 0;
-}
 
-static void __inline__ _raw_spin_lock(spinlock_t *lock)
+static void __inline__ __raw_spin_lock(raw_spinlock_t *lock)
 {
        while (1) {
                if (likely(__spin_trylock(lock) == 0))
@@ -110,12 +87,12 @@ static void __inline__ _raw_spin_lock(spinlock_t *lock)
                        HMT_low();
                        if (SHARED_PROCESSOR)
                                __spin_yield(lock);
-               } while (unlikely(lock->lock != 0));
+               } while (unlikely(lock->slock != 0));
                HMT_medium();
        }
 }
 
-static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
+static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
 {
        unsigned long flags_dis;
 
@@ -128,12 +105,20 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag
                        HMT_low();
                        if (SHARED_PROCESSOR)
                                __spin_yield(lock);
-               } while (unlikely(lock->lock != 0));
+               } while (unlikely(lock->slock != 0));
                HMT_medium();
                local_irq_restore(flags_dis);
        }
 }
 
+static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
+{
+       __asm__ __volatile__("lwsync    # __raw_spin_unlock": : :"memory");
+       lock->slock = 0;
+}
+
+extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
+
 /*
  * Read-write spinlocks, allowing multiple readers
  * but only one writer.
@@ -144,24 +129,15 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag
  * irq-safe write-lock, but readers can get non-irqsafe
  * read-locks.
  */
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
 
-#define rwlock_init(x)         do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-
-#define read_can_lock(rw)      ((rw)->lock >= 0)
-#define write_can_lock(rw)     (!(rw)->lock)
-
-static __inline__ void _raw_write_unlock(rwlock_t *rw)
-{
-       __asm__ __volatile__("lwsync            # write_unlock": : :"memory");
-       rw->lock = 0;
-}
+#define __raw_read_can_lock(rw)                ((rw)->lock >= 0)
+#define __raw_write_can_lock(rw)       (!(rw)->lock)
 
 /*
  * This returns the old value in the lock + 1,
  * so we got a read lock if the return value is > 0.
  */
-static long __inline__ __read_trylock(rwlock_t *rw)
+static long __inline__ __read_trylock(raw_rwlock_t *rw)
 {
        long tmp;
 
@@ -180,45 +156,11 @@ static long __inline__ __read_trylock(rwlock_t *rw)
        return tmp;
 }
 
-static int __inline__ _raw_read_trylock(rwlock_t *rw)
-{
-       return __read_trylock(rw) > 0;
-}
-
-static void __inline__ _raw_read_lock(rwlock_t *rw)
-{
-       while (1) {
-               if (likely(__read_trylock(rw) > 0))
-                       break;
-               do {
-                       HMT_low();
-                       if (SHARED_PROCESSOR)
-                               __rw_yield(rw);
-               } while (unlikely(rw->lock < 0));
-               HMT_medium();
-       }
-}
-
-static void __inline__ _raw_read_unlock(rwlock_t *rw)
-{
-       long tmp;
-
-       __asm__ __volatile__(
-       "eieio                          # read_unlock\n\
-1:     lwarx           %0,0,%1\n\
-       addic           %0,%0,-1\n\
-       stwcx.          %0,0,%1\n\
-       bne-            1b"
-       : "=&r"(tmp)
-       : "r"(&rw->lock)
-       : "cr0", "memory");
-}
-
 /*
  * This returns the old value in the lock,
  * so we got the write lock if the return value is 0.
  */
-static __inline__ long __write_trylock(rwlock_t *rw)
+static __inline__ long __write_trylock(raw_rwlock_t *rw)
 {
        long tmp, tmp2;
 
@@ -237,12 +179,21 @@ static __inline__ long __write_trylock(rwlock_t *rw)
        return tmp;
 }
 
-static int __inline__ _raw_write_trylock(rwlock_t *rw)
+static void __inline__ __raw_read_lock(raw_rwlock_t *rw)
 {
-       return __write_trylock(rw) == 0;
+       while (1) {
+               if (likely(__read_trylock(rw) > 0))
+                       break;
+               do {
+                       HMT_low();
+                       if (SHARED_PROCESSOR)
+                               __rw_yield(rw);
+               } while (unlikely(rw->lock < 0));
+               HMT_medium();
+       }
 }
 
-static void __inline__ _raw_write_lock(rwlock_t *rw)
+static void __inline__ __raw_write_lock(raw_rwlock_t *rw)
 {
        while (1) {
                if (likely(__write_trylock(rw) == 0))
@@ -256,5 +207,35 @@ static void __inline__ _raw_write_lock(rwlock_t *rw)
        }
 }
 
-#endif /* __KERNEL__ */
+static int __inline__ __raw_read_trylock(raw_rwlock_t *rw)
+{
+       return __read_trylock(rw) > 0;
+}
+
+static int __inline__ __raw_write_trylock(raw_rwlock_t *rw)
+{
+       return __write_trylock(rw) == 0;
+}
+
+static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
+{
+       long tmp;
+
+       __asm__ __volatile__(
+       "eieio                          # read_unlock\n\
+1:     lwarx           %0,0,%1\n\
+       addic           %0,%0,-1\n\
+       stwcx.          %0,0,%1\n\
+       bne-            1b"
+       : "=&r"(tmp)
+       : "r"(&rw->lock)
+       : "cr0", "memory");
+}
+
+static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
+{
+       __asm__ __volatile__("lwsync    # write_unlock": : :"memory");
+       rw->lock = 0;
+}
+
 #endif /* __ASM_SPINLOCK_H */