[PATCH] lockdep: clean up rwsems
[pandora-kernel.git] / lib / rwsem-spinlock.c
1 /* rwsem-spinlock.c: R/W semaphores: contention handling functions for
2  * generic spinlock implementation
3  *
4  * Copyright (c) 2001   David Howells (dhowells@redhat.com).
5  * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
6  * - Derived also from comments by Linus
7  */
8 #include <linux/rwsem.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11
12 struct rwsem_waiter {
13         struct list_head list;
14         struct task_struct *task;
15         unsigned int flags;
16 #define RWSEM_WAITING_FOR_READ  0x00000001
17 #define RWSEM_WAITING_FOR_WRITE 0x00000002
18 };
19
20 /*
21  * initialise the semaphore
22  */
23 void fastcall init_rwsem(struct rw_semaphore *sem)
24 {
25         sem->activity = 0;
26         spin_lock_init(&sem->wait_lock);
27         INIT_LIST_HEAD(&sem->wait_list);
28 }
29
30 /*
31  * handle the lock release when processes blocked on it that can now run
32  * - if we come here, then:
33  *   - the 'active count' _reached_ zero
34  *   - the 'waiting count' is non-zero
35  * - the spinlock must be held by the caller
36  * - woken process blocks are discarded from the list after having task zeroed
37  * - writers are only woken if wakewrite is non-zero
38  */
39 static inline struct rw_semaphore *
40 __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
41 {
42         struct rwsem_waiter *waiter;
43         struct task_struct *tsk;
44         int woken;
45
46         waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
47
48         if (!wakewrite) {
49                 if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
50                         goto out;
51                 goto dont_wake_writers;
52         }
53
54         /* if we are allowed to wake writers try to grant a single write lock
55          * if there's a writer at the front of the queue
56          * - we leave the 'waiting count' incremented to signify potential
57          *   contention
58          */
59         if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
60                 sem->activity = -1;
61                 list_del(&waiter->list);
62                 tsk = waiter->task;
63                 /* Don't touch waiter after ->task has been NULLed */
64                 smp_mb();
65                 waiter->task = NULL;
66                 wake_up_process(tsk);
67                 put_task_struct(tsk);
68                 goto out;
69         }
70
71         /* grant an infinite number of read locks to the front of the queue */
72  dont_wake_writers:
73         woken = 0;
74         while (waiter->flags & RWSEM_WAITING_FOR_READ) {
75                 struct list_head *next = waiter->list.next;
76
77                 list_del(&waiter->list);
78                 tsk = waiter->task;
79                 smp_mb();
80                 waiter->task = NULL;
81                 wake_up_process(tsk);
82                 put_task_struct(tsk);
83                 woken++;
84                 if (list_empty(&sem->wait_list))
85                         break;
86                 waiter = list_entry(next, struct rwsem_waiter, list);
87         }
88
89         sem->activity += woken;
90
91  out:
92         return sem;
93 }
94
95 /*
96  * wake a single writer
97  */
98 static inline struct rw_semaphore *
99 __rwsem_wake_one_writer(struct rw_semaphore *sem)
100 {
101         struct rwsem_waiter *waiter;
102         struct task_struct *tsk;
103
104         sem->activity = -1;
105
106         waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
107         list_del(&waiter->list);
108
109         tsk = waiter->task;
110         smp_mb();
111         waiter->task = NULL;
112         wake_up_process(tsk);
113         put_task_struct(tsk);
114         return sem;
115 }
116
117 /*
118  * get a read lock on the semaphore
119  */
120 void fastcall __sched __down_read(struct rw_semaphore *sem)
121 {
122         struct rwsem_waiter waiter;
123         struct task_struct *tsk;
124
125         spin_lock_irq(&sem->wait_lock);
126
127         if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
128                 /* granted */
129                 sem->activity++;
130                 spin_unlock_irq(&sem->wait_lock);
131                 goto out;
132         }
133
134         tsk = current;
135         set_task_state(tsk, TASK_UNINTERRUPTIBLE);
136
137         /* set up my own style of waitqueue */
138         waiter.task = tsk;
139         waiter.flags = RWSEM_WAITING_FOR_READ;
140         get_task_struct(tsk);
141
142         list_add_tail(&waiter.list, &sem->wait_list);
143
144         /* we don't need to touch the semaphore struct anymore */
145         spin_unlock_irq(&sem->wait_lock);
146
147         /* wait to be given the lock */
148         for (;;) {
149                 if (!waiter.task)
150                         break;
151                 schedule();
152                 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
153         }
154
155         tsk->state = TASK_RUNNING;
156  out:
157         ;
158 }
159
160 /*
161  * trylock for reading -- returns 1 if successful, 0 if contention
162  */
163 int fastcall __down_read_trylock(struct rw_semaphore *sem)
164 {
165         unsigned long flags;
166         int ret = 0;
167
168
169         spin_lock_irqsave(&sem->wait_lock, flags);
170
171         if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
172                 /* granted */
173                 sem->activity++;
174                 ret = 1;
175         }
176
177         spin_unlock_irqrestore(&sem->wait_lock, flags);
178
179         return ret;
180 }
181
182 /*
183  * get a write lock on the semaphore
184  * - we increment the waiting count anyway to indicate an exclusive lock
185  */
186 void fastcall __sched __down_write(struct rw_semaphore *sem)
187 {
188         struct rwsem_waiter waiter;
189         struct task_struct *tsk;
190
191         spin_lock_irq(&sem->wait_lock);
192
193         if (sem->activity == 0 && list_empty(&sem->wait_list)) {
194                 /* granted */
195                 sem->activity = -1;
196                 spin_unlock_irq(&sem->wait_lock);
197                 goto out;
198         }
199
200         tsk = current;
201         set_task_state(tsk, TASK_UNINTERRUPTIBLE);
202
203         /* set up my own style of waitqueue */
204         waiter.task = tsk;
205         waiter.flags = RWSEM_WAITING_FOR_WRITE;
206         get_task_struct(tsk);
207
208         list_add_tail(&waiter.list, &sem->wait_list);
209
210         /* we don't need to touch the semaphore struct anymore */
211         spin_unlock_irq(&sem->wait_lock);
212
213         /* wait to be given the lock */
214         for (;;) {
215                 if (!waiter.task)
216                         break;
217                 schedule();
218                 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
219         }
220
221         tsk->state = TASK_RUNNING;
222  out:
223         ;
224 }
225
226 /*
227  * trylock for writing -- returns 1 if successful, 0 if contention
228  */
229 int fastcall __down_write_trylock(struct rw_semaphore *sem)
230 {
231         unsigned long flags;
232         int ret = 0;
233
234         spin_lock_irqsave(&sem->wait_lock, flags);
235
236         if (sem->activity == 0 && list_empty(&sem->wait_list)) {
237                 /* granted */
238                 sem->activity = -1;
239                 ret = 1;
240         }
241
242         spin_unlock_irqrestore(&sem->wait_lock, flags);
243
244         return ret;
245 }
246
247 /*
248  * release a read lock on the semaphore
249  */
250 void fastcall __up_read(struct rw_semaphore *sem)
251 {
252         unsigned long flags;
253
254         spin_lock_irqsave(&sem->wait_lock, flags);
255
256         if (--sem->activity == 0 && !list_empty(&sem->wait_list))
257                 sem = __rwsem_wake_one_writer(sem);
258
259         spin_unlock_irqrestore(&sem->wait_lock, flags);
260 }
261
262 /*
263  * release a write lock on the semaphore
264  */
265 void fastcall __up_write(struct rw_semaphore *sem)
266 {
267         unsigned long flags;
268
269         spin_lock_irqsave(&sem->wait_lock, flags);
270
271         sem->activity = 0;
272         if (!list_empty(&sem->wait_list))
273                 sem = __rwsem_do_wake(sem, 1);
274
275         spin_unlock_irqrestore(&sem->wait_lock, flags);
276 }
277
278 /*
279  * downgrade a write lock into a read lock
280  * - just wake up any readers at the front of the queue
281  */
282 void fastcall __downgrade_write(struct rw_semaphore *sem)
283 {
284         unsigned long flags;
285
286         spin_lock_irqsave(&sem->wait_lock, flags);
287
288         sem->activity = 1;
289         if (!list_empty(&sem->wait_list))
290                 sem = __rwsem_do_wake(sem, 0);
291
292         spin_unlock_irqrestore(&sem->wait_lock, flags);
293 }
294
295 EXPORT_SYMBOL(init_rwsem);
296 EXPORT_SYMBOL(__down_read);
297 EXPORT_SYMBOL(__down_read_trylock);
298 EXPORT_SYMBOL(__down_write);
299 EXPORT_SYMBOL(__down_write_trylock);
300 EXPORT_SYMBOL(__up_read);
301 EXPORT_SYMBOL(__up_write);
302 EXPORT_SYMBOL(__downgrade_write);