Merge git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6
[pandora-kernel.git] / arch / sparc / include / asm / rwsem.h
1 /*
2  * rwsem.h: R/W semaphores implemented using CAS
3  *
4  * Written by David S. Miller (davem@redhat.com), 2001.
5  * Derived from asm-i386/rwsem.h
6  */
7 #ifndef _SPARC64_RWSEM_H
8 #define _SPARC64_RWSEM_H
9
10 #ifndef _LINUX_RWSEM_H
11 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
12 #endif
13
14 #ifdef __KERNEL__
15
16 #include <linux/list.h>
17 #include <linux/spinlock.h>
18
19 struct rwsem_waiter;
20
21 struct rw_semaphore {
22         signed long                     count;
23 #define RWSEM_UNLOCKED_VALUE            0x00000000L
24 #define RWSEM_ACTIVE_BIAS               0x00000001L
25 #define RWSEM_ACTIVE_MASK               0xffffffffL
26 #define RWSEM_WAITING_BIAS              (-RWSEM_ACTIVE_MASK-1)
27 #define RWSEM_ACTIVE_READ_BIAS          RWSEM_ACTIVE_BIAS
28 #define RWSEM_ACTIVE_WRITE_BIAS         (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
29         spinlock_t                      wait_lock;
30         struct list_head                wait_list;
31 #ifdef CONFIG_DEBUG_LOCK_ALLOC
32         struct lockdep_map              dep_map;
33 #endif
34 };
35
36 #ifdef CONFIG_DEBUG_LOCK_ALLOC
37 # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
38 #else
39 # define __RWSEM_DEP_MAP_INIT(lockname)
40 #endif
41
42 #define __RWSEM_INITIALIZER(name) \
43 { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
44   LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
45
46 #define DECLARE_RWSEM(name) \
47         struct rw_semaphore name = __RWSEM_INITIALIZER(name)
48
49 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
50 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
51 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
52 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
53
54 extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
55                          struct lock_class_key *key);
56
57 #define init_rwsem(sem)                                         \
58 do {                                                            \
59         static struct lock_class_key __key;                     \
60                                                                 \
61         __init_rwsem((sem), #sem, &__key);                      \
62 } while (0)
63
64 /*
65  * lock for reading
66  */
67 static inline void __down_read(struct rw_semaphore *sem)
68 {
69         if (unlikely(atomic64_inc_return((atomic64_t *)(&sem->count)) <= 0L))
70                 rwsem_down_read_failed(sem);
71 }
72
73 static inline int __down_read_trylock(struct rw_semaphore *sem)
74 {
75         long tmp;
76
77         while ((tmp = sem->count) >= 0L) {
78                 if (tmp == cmpxchg(&sem->count, tmp,
79                                    tmp + RWSEM_ACTIVE_READ_BIAS)) {
80                         return 1;
81                 }
82         }
83         return 0;
84 }
85
86 /*
87  * lock for writing
88  */
89 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
90 {
91         long tmp;
92
93         tmp = atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS,
94                                   (atomic64_t *)(&sem->count));
95         if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
96                 rwsem_down_write_failed(sem);
97 }
98
99 static inline void __down_write(struct rw_semaphore *sem)
100 {
101         __down_write_nested(sem, 0);
102 }
103
104 static inline int __down_write_trylock(struct rw_semaphore *sem)
105 {
106         long tmp;
107
108         tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
109                       RWSEM_ACTIVE_WRITE_BIAS);
110         return tmp == RWSEM_UNLOCKED_VALUE;
111 }
112
113 /*
114  * unlock after reading
115  */
116 static inline void __up_read(struct rw_semaphore *sem)
117 {
118         long tmp;
119
120         tmp = atomic64_dec_return((atomic64_t *)(&sem->count));
121         if (unlikely(tmp < -1L && (tmp & RWSEM_ACTIVE_MASK) == 0L))
122                 rwsem_wake(sem);
123 }
124
125 /*
126  * unlock after writing
127  */
128 static inline void __up_write(struct rw_semaphore *sem)
129 {
130         if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
131                                          (atomic64_t *)(&sem->count)) < 0L))
132                 rwsem_wake(sem);
133 }
134
135 /*
136  * implement atomic add functionality
137  */
138 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
139 {
140         atomic64_add(delta, (atomic64_t *)(&sem->count));
141 }
142
143 /*
144  * downgrade write lock to read lock
145  */
146 static inline void __downgrade_write(struct rw_semaphore *sem)
147 {
148         long tmp;
149
150         tmp = atomic64_add_return(-RWSEM_WAITING_BIAS, (atomic64_t *)(&sem->count));
151         if (tmp < 0L)
152                 rwsem_downgrade_wake(sem);
153 }
154
155 /*
156  * implement exchange and add functionality
157  */
158 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
159 {
160         return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
161 }
162
163 static inline int rwsem_is_locked(struct rw_semaphore *sem)
164 {
165         return (sem->count != 0);
166 }
167
168 #endif /* __KERNEL__ */
169
170 #endif /* _SPARC64_RWSEM_H */