[PATCH] mutex subsystem, add asm-generic/mutex-[dec|xchg|null].h implementations
[pandora-kernel.git] / include / asm-generic / mutex-dec.h
1 /*
2  * asm-generic/mutex-dec.h
3  *
4  * Generic implementation of the mutex fastpath, based on atomic
5  * decrement/increment.
6  */
7 #ifndef _ASM_GENERIC_MUTEX_DEC_H
8 #define _ASM_GENERIC_MUTEX_DEC_H
9
10 /**
11  *  __mutex_fastpath_lock - try to take the lock by moving the count
12  *                          from 1 to a 0 value
13  *  @count: pointer of type atomic_t
14  *  @fail_fn: function to call if the original value was not 1
15  *
16  * Change the count from 1 to a value lower than 1, and call <fail_fn> if
17  * it wasn't 1 originally. This function MUST leave the value lower than
18  * 1 even when the "1" assertion wasn't true.
19  */
20 #define __mutex_fastpath_lock(count, fail_fn)                           \
21 do {                                                                    \
22         if (unlikely(atomic_dec_return(count) < 0))                     \
23                 fail_fn(count);                                         \
24         else                                                            \
25                 smp_mb();                                               \
26 } while (0)
27
28 /**
29  *  __mutex_fastpath_lock_retval - try to take the lock by moving the count
30  *                                 from 1 to a 0 value
31  *  @count: pointer of type atomic_t
32  *  @fail_fn: function to call if the original value was not 1
33  *
34  * Change the count from 1 to a value lower than 1, and call <fail_fn> if
35  * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
36  * or anything the slow path function returns.
37  */
38 static inline int
39 __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
40 {
41         if (unlikely(atomic_dec_return(count) < 0))
42                 return fail_fn(count);
43         else {
44                 smp_mb();
45                 return 0;
46         }
47 }
48
49 /**
50  *  __mutex_fastpath_unlock - try to promote the count from 0 to 1
51  *  @count: pointer of type atomic_t
52  *  @fail_fn: function to call if the original value was not 0
53  *
54  * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
55  * In the failure case, this function is allowed to either set the value to
56  * 1, or to set it to a value lower than 1.
57  *
58  * If the implementation sets it to a value of lower than 1, then the
59  * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
60  * to return 0 otherwise.
61  */
62 #define __mutex_fastpath_unlock(count, fail_fn)                         \
63 do {                                                                    \
64         smp_mb();                                                       \
65         if (unlikely(atomic_inc_return(count) <= 0))                    \
66                 fail_fn(count);                                         \
67 } while (0)
68
69 #define __mutex_slowpath_needs_to_unlock()              1
70
71 /**
72  * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
73  *
74  *  @count: pointer of type atomic_t
75  *  @fail_fn: fallback function
76  *
77  * Change the count from 1 to a value lower than 1, and return 0 (failure)
78  * if it wasn't 1 originally, or return 1 (success) otherwise. This function
79  * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
80  * Additionally, if the value was < 0 originally, this function must not leave
81  * it to 0 on failure.
82  *
83  * If the architecture has no effective trylock variant, it should call the
84  * <fail_fn> spinlock-based trylock variant unconditionally.
85  */
86 static inline int
87 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
88 {
89         /*
90          * We have two variants here. The cmpxchg based one is the best one
91          * because it never induce a false contention state.  It is included
92          * here because architectures using the inc/dec algorithms over the
93          * xchg ones are much more likely to support cmpxchg natively.
94          *
95          * If not we fall back to the spinlock based variant - that is
96          * just as efficient (and simpler) as a 'destructive' probing of
97          * the mutex state would be.
98          */
99 #ifdef __HAVE_ARCH_CMPXCHG
100         if (likely(atomic_cmpxchg(count, 1, 0)) == 1) {
101                 smp_mb();
102                 return 1;
103         }
104         return 0;
105 #else
106         return fail_fn(count);
107 #endif
108 }
109
110 #endif