mutex: speed up generic mutex implementations
[pandora-kernel.git] / include / asm-generic / mutex-dec.h
index 40c6d1f..f104af7 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * asm-generic/mutex-dec.h
+ * include/asm-generic/mutex-dec.h
  *
  * Generic implementation of the mutex fastpath, based on atomic
  * decrement/increment.
  * it wasn't 1 originally. This function MUST leave the value lower than
  * 1 even when the "1" assertion wasn't true.
  */
-#define __mutex_fastpath_lock(count, fail_fn)                          \
-do {                                                                   \
-       if (unlikely(atomic_dec_return(count) < 0))                     \
-               fail_fn(count);                                         \
-       else                                                            \
-               smp_mb();                                               \
-} while (0)
+static inline void
+__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+       if (unlikely(atomic_dec_return(count) < 0))
+               fail_fn(count);
+}
 
 /**
  *  __mutex_fastpath_lock_retval - try to take the lock by moving the count
@@ -40,10 +39,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
 {
        if (unlikely(atomic_dec_return(count) < 0))
                return fail_fn(count);
-       else {
-               smp_mb();
-               return 0;
-       }
+       return 0;
 }
 
 /**
@@ -59,12 +55,12 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
  * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
  * to return 0 otherwise.
  */
-#define __mutex_fastpath_unlock(count, fail_fn)                                \
-do {                                                                   \
-       smp_mb();                                                       \
-       if (unlikely(atomic_inc_return(count) <= 0))                    \
-               fail_fn(count);                                         \
-} while (0)
+static inline void
+__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+       if (unlikely(atomic_inc_return(count) <= 0))
+               fail_fn(count);
+}
 
 #define __mutex_slowpath_needs_to_unlock()             1
 
@@ -86,25 +82,9 @@ do {                                                                 \
 static inline int
 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
 {
-       /*
-        * We have two variants here. The cmpxchg based one is the best one
-        * because it never induce a false contention state.  It is included
-        * here because architectures using the inc/dec algorithms over the
-        * xchg ones are much more likely to support cmpxchg natively.
-        *
-        * If not we fall back to the spinlock based variant - that is
-        * just as efficient (and simpler) as a 'destructive' probing of
-        * the mutex state would be.
-        */
-#ifdef __HAVE_ARCH_CMPXCHG
-       if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
-               smp_mb();
+       if (likely(atomic_cmpxchg(count, 1, 0) == 1))
                return 1;
-       }
        return 0;
-#else
-       return fail_fn(count);
-#endif
 }
 
 #endif