Merge branch 'x86/amd-nb' into x86/apic-cleanups
[pandora-kernel.git] / arch / powerpc / include / asm / rwsem.h
1 #ifndef _ASM_POWERPC_RWSEM_H
2 #define _ASM_POWERPC_RWSEM_H
3
4 #ifndef _LINUX_RWSEM_H
5 #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
6 #endif
7
8 #ifdef __KERNEL__
9
10 /*
11  * R/W semaphores for PPC using the stuff in lib/rwsem.c.
12  * Adapted largely from include/asm-i386/rwsem.h
13  * by Paul Mackerras <paulus@samba.org>.
14  */
15
16 #include <linux/list.h>
17 #include <linux/spinlock.h>
18 #include <asm/atomic.h>
19 #include <asm/system.h>
20
21 /*
22  * the semaphore definition
23  */
24 #ifdef CONFIG_PPC64
25 # define RWSEM_ACTIVE_MASK              0xffffffffL
26 #else
27 # define RWSEM_ACTIVE_MASK              0x0000ffffL
28 #endif
29
30 #define RWSEM_UNLOCKED_VALUE            0x00000000L
31 #define RWSEM_ACTIVE_BIAS               0x00000001L
32 #define RWSEM_WAITING_BIAS              (-RWSEM_ACTIVE_MASK-1)
33 #define RWSEM_ACTIVE_READ_BIAS          RWSEM_ACTIVE_BIAS
34 #define RWSEM_ACTIVE_WRITE_BIAS         (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
35
36 struct rw_semaphore {
37         long                    count;
38         spinlock_t              wait_lock;
39         struct list_head        wait_list;
40 #ifdef CONFIG_DEBUG_LOCK_ALLOC
41         struct lockdep_map      dep_map;
42 #endif
43 };
44
45 #ifdef CONFIG_DEBUG_LOCK_ALLOC
46 # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
47 #else
48 # define __RWSEM_DEP_MAP_INIT(lockname)
49 #endif
50
51 #define __RWSEM_INITIALIZER(name)                               \
52 {                                                               \
53         RWSEM_UNLOCKED_VALUE,                                   \
54         __SPIN_LOCK_UNLOCKED((name).wait_lock),                 \
55         LIST_HEAD_INIT((name).wait_list)                        \
56         __RWSEM_DEP_MAP_INIT(name)                              \
57 }
58
59 #define DECLARE_RWSEM(name)             \
60         struct rw_semaphore name = __RWSEM_INITIALIZER(name)
61
62 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
63 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
64 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
65 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
66
67 extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
68                          struct lock_class_key *key);
69
70 #define init_rwsem(sem)                                 \
71         do {                                            \
72                 static struct lock_class_key __key;     \
73                                                         \
74                 __init_rwsem((sem), #sem, &__key);      \
75         } while (0)
76
77 /*
78  * lock for reading
79  */
80 static inline void __down_read(struct rw_semaphore *sem)
81 {
82         if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
83                 rwsem_down_read_failed(sem);
84 }
85
86 static inline int __down_read_trylock(struct rw_semaphore *sem)
87 {
88         long tmp;
89
90         while ((tmp = sem->count) >= 0) {
91                 if (tmp == cmpxchg(&sem->count, tmp,
92                                    tmp + RWSEM_ACTIVE_READ_BIAS)) {
93                         return 1;
94                 }
95         }
96         return 0;
97 }
98
99 /*
100  * lock for writing
101  */
102 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
103 {
104         long tmp;
105
106         tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
107                                      (atomic_long_t *)&sem->count);
108         if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
109                 rwsem_down_write_failed(sem);
110 }
111
112 static inline void __down_write(struct rw_semaphore *sem)
113 {
114         __down_write_nested(sem, 0);
115 }
116
117 static inline int __down_write_trylock(struct rw_semaphore *sem)
118 {
119         long tmp;
120
121         tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
122                       RWSEM_ACTIVE_WRITE_BIAS);
123         return tmp == RWSEM_UNLOCKED_VALUE;
124 }
125
126 /*
127  * unlock after reading
128  */
129 static inline void __up_read(struct rw_semaphore *sem)
130 {
131         long tmp;
132
133         tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
134         if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
135                 rwsem_wake(sem);
136 }
137
138 /*
139  * unlock after writing
140  */
141 static inline void __up_write(struct rw_semaphore *sem)
142 {
143         if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
144                                  (atomic_long_t *)&sem->count) < 0))
145                 rwsem_wake(sem);
146 }
147
148 /*
149  * implement atomic add functionality
150  */
151 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
152 {
153         atomic_long_add(delta, (atomic_long_t *)&sem->count);
154 }
155
156 /*
157  * downgrade write lock to read lock
158  */
159 static inline void __downgrade_write(struct rw_semaphore *sem)
160 {
161         long tmp;
162
163         tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
164                                      (atomic_long_t *)&sem->count);
165         if (tmp < 0)
166                 rwsem_downgrade_wake(sem);
167 }
168
169 /*
170  * implement exchange and add functionality
171  */
172 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
173 {
174         return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
175 }
176
177 static inline int rwsem_is_locked(struct rw_semaphore *sem)
178 {
179         return sem->count != 0;
180 }
181
182 #endif  /* __KERNEL__ */
183 #endif  /* _ASM_POWERPC_RWSEM_H */