x86/amd-iommu: Un__init function required on shutdown
[pandora-kernel.git] / kernel / wait.c
1 /*
2  * Generic waiting primitives.
3  *
4  * (C) 2004 William Irwin, Oracle
5  */
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/wait.h>
11 #include <linux/hash.h>
12
13 void init_waitqueue_head(wait_queue_head_t *q)
14 {
15         spin_lock_init(&q->lock);
16         INIT_LIST_HEAD(&q->task_list);
17 }
18
19 EXPORT_SYMBOL(init_waitqueue_head);
20
21 void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
22 {
23         unsigned long flags;
24
25         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
26         spin_lock_irqsave(&q->lock, flags);
27         __add_wait_queue(q, wait);
28         spin_unlock_irqrestore(&q->lock, flags);
29 }
30 EXPORT_SYMBOL(add_wait_queue);
31
32 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
33 {
34         unsigned long flags;
35
36         wait->flags |= WQ_FLAG_EXCLUSIVE;
37         spin_lock_irqsave(&q->lock, flags);
38         __add_wait_queue_tail(q, wait);
39         spin_unlock_irqrestore(&q->lock, flags);
40 }
41 EXPORT_SYMBOL(add_wait_queue_exclusive);
42
43 void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
44 {
45         unsigned long flags;
46
47         spin_lock_irqsave(&q->lock, flags);
48         __remove_wait_queue(q, wait);
49         spin_unlock_irqrestore(&q->lock, flags);
50 }
51 EXPORT_SYMBOL(remove_wait_queue);
52
53
54 /*
55  * Note: we use "set_current_state()" _after_ the wait-queue add,
56  * because we need a memory barrier there on SMP, so that any
57  * wake-function that tests for the wait-queue being active
58  * will be guaranteed to see waitqueue addition _or_ subsequent
59  * tests in this thread will see the wakeup having taken place.
60  *
61  * The spin_unlock() itself is semi-permeable and only protects
62  * one way (it only protects stuff inside the critical region and
63  * stops them from bleeding out - it would still allow subsequent
64  * loads to move into the critical region).
65  */
66 void
67 prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
68 {
69         unsigned long flags;
70
71         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
72         spin_lock_irqsave(&q->lock, flags);
73         if (list_empty(&wait->task_list))
74                 __add_wait_queue(q, wait);
75         /*
76          * don't alter the task state if this is just going to
77          * queue an async wait queue callback
78          */
79         if (is_sync_wait(wait))
80                 set_current_state(state);
81         spin_unlock_irqrestore(&q->lock, flags);
82 }
83 EXPORT_SYMBOL(prepare_to_wait);
84
85 void
86 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
87 {
88         unsigned long flags;
89
90         wait->flags |= WQ_FLAG_EXCLUSIVE;
91         spin_lock_irqsave(&q->lock, flags);
92         if (list_empty(&wait->task_list))
93                 __add_wait_queue_tail(q, wait);
94         /*
95          * don't alter the task state if this is just going to
96          * queue an async wait queue callback
97          */
98         if (is_sync_wait(wait))
99                 set_current_state(state);
100         spin_unlock_irqrestore(&q->lock, flags);
101 }
102 EXPORT_SYMBOL(prepare_to_wait_exclusive);
103
104 /*
105  * finish_wait - clean up after waiting in a queue
106  * @q: waitqueue waited on
107  * @wait: wait descriptor
108  *
109  * Sets current thread back to running state and removes
110  * the wait descriptor from the given waitqueue if still
111  * queued.
112  */
113 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
114 {
115         unsigned long flags;
116
117         __set_current_state(TASK_RUNNING);
118         /*
119          * We can check for list emptiness outside the lock
120          * IFF:
121          *  - we use the "careful" check that verifies both
122          *    the next and prev pointers, so that there cannot
123          *    be any half-pending updates in progress on other
124          *    CPU's that we haven't seen yet (and that might
125          *    still change the stack area.
126          * and
127          *  - all other users take the lock (ie we can only
128          *    have _one_ other CPU that looks at or modifies
129          *    the list).
130          */
131         if (!list_empty_careful(&wait->task_list)) {
132                 spin_lock_irqsave(&q->lock, flags);
133                 list_del_init(&wait->task_list);
134                 spin_unlock_irqrestore(&q->lock, flags);
135         }
136 }
137 EXPORT_SYMBOL(finish_wait);
138
139 /*
140  * abort_exclusive_wait - abort exclusive waiting in a queue
141  * @q: waitqueue waited on
142  * @wait: wait descriptor
143  * @state: runstate of the waiter to be woken
144  * @key: key to identify a wait bit queue or %NULL
145  *
146  * Sets current thread back to running state and removes
147  * the wait descriptor from the given waitqueue if still
148  * queued.
149  *
150  * Wakes up the next waiter if the caller is concurrently
151  * woken up through the queue.
152  *
153  * This prevents waiter starvation where an exclusive waiter
154  * aborts and is woken up concurrently and noone wakes up
155  * the next waiter.
156  */
157 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
158                         unsigned int mode, void *key)
159 {
160         unsigned long flags;
161
162         __set_current_state(TASK_RUNNING);
163         spin_lock_irqsave(&q->lock, flags);
164         if (!list_empty(&wait->task_list))
165                 list_del_init(&wait->task_list);
166         else if (waitqueue_active(q))
167                 __wake_up_common(q, mode, 1, 0, key);
168         spin_unlock_irqrestore(&q->lock, flags);
169 }
170 EXPORT_SYMBOL(abort_exclusive_wait);
171
172 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
173 {
174         int ret = default_wake_function(wait, mode, sync, key);
175
176         if (ret)
177                 list_del_init(&wait->task_list);
178         return ret;
179 }
180 EXPORT_SYMBOL(autoremove_wake_function);
181
182 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
183 {
184         struct wait_bit_key *key = arg;
185         struct wait_bit_queue *wait_bit
186                 = container_of(wait, struct wait_bit_queue, wait);
187
188         if (wait_bit->key.flags != key->flags ||
189                         wait_bit->key.bit_nr != key->bit_nr ||
190                         test_bit(key->bit_nr, key->flags))
191                 return 0;
192         else
193                 return autoremove_wake_function(wait, mode, sync, key);
194 }
195 EXPORT_SYMBOL(wake_bit_function);
196
197 /*
198  * To allow interruptible waiting and asynchronous (i.e. nonblocking)
199  * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
200  * permitted return codes. Nonzero return codes halt waiting and return.
201  */
202 int __sched
203 __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
204                         int (*action)(void *), unsigned mode)
205 {
206         int ret = 0;
207
208         do {
209                 prepare_to_wait(wq, &q->wait, mode);
210                 if (test_bit(q->key.bit_nr, q->key.flags))
211                         ret = (*action)(q->key.flags);
212         } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
213         finish_wait(wq, &q->wait);
214         return ret;
215 }
216 EXPORT_SYMBOL(__wait_on_bit);
217
218 int __sched out_of_line_wait_on_bit(void *word, int bit,
219                                         int (*action)(void *), unsigned mode)
220 {
221         wait_queue_head_t *wq = bit_waitqueue(word, bit);
222         DEFINE_WAIT_BIT(wait, word, bit);
223
224         return __wait_on_bit(wq, &wait, action, mode);
225 }
226 EXPORT_SYMBOL(out_of_line_wait_on_bit);
227
228 int __sched
229 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
230                         int (*action)(void *), unsigned mode)
231 {
232         do {
233                 int ret;
234
235                 prepare_to_wait_exclusive(wq, &q->wait, mode);
236                 if (!test_bit(q->key.bit_nr, q->key.flags))
237                         continue;
238                 ret = action(q->key.flags);
239                 if (!ret)
240                         continue;
241                 abort_exclusive_wait(wq, &q->wait, mode, &q->key);
242                 return ret;
243         } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
244         finish_wait(wq, &q->wait);
245         return 0;
246 }
247 EXPORT_SYMBOL(__wait_on_bit_lock);
248
249 int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
250                                         int (*action)(void *), unsigned mode)
251 {
252         wait_queue_head_t *wq = bit_waitqueue(word, bit);
253         DEFINE_WAIT_BIT(wait, word, bit);
254
255         return __wait_on_bit_lock(wq, &wait, action, mode);
256 }
257 EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
258
259 void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
260 {
261         struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
262         if (waitqueue_active(wq))
263                 __wake_up(wq, TASK_NORMAL, 1, &key);
264 }
265 EXPORT_SYMBOL(__wake_up_bit);
266
267 /**
268  * wake_up_bit - wake up a waiter on a bit
269  * @word: the word being waited on, a kernel virtual address
270  * @bit: the bit of the word being waited on
271  *
272  * There is a standard hashed waitqueue table for generic use. This
273  * is the part of the hashtable's accessor API that wakes up waiters
274  * on a bit. For instance, if one were to have waiters on a bitflag,
275  * one would call wake_up_bit() after clearing the bit.
276  *
277  * In order for this to function properly, as it uses waitqueue_active()
278  * internally, some kind of memory barrier must be done prior to calling
279  * this. Typically, this will be smp_mb__after_clear_bit(), but in some
280  * cases where bitflags are manipulated non-atomically under a lock, one
281  * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
282  * because spin_unlock() does not guarantee a memory barrier.
283  */
284 void wake_up_bit(void *word, int bit)
285 {
286         __wake_up_bit(bit_waitqueue(word, bit), word, bit);
287 }
288 EXPORT_SYMBOL(wake_up_bit);
289
290 wait_queue_head_t *bit_waitqueue(void *word, int bit)
291 {
292         const int shift = BITS_PER_LONG == 32 ? 5 : 6;
293         const struct zone *zone = page_zone(virt_to_page(word));
294         unsigned long val = (unsigned long)word << shift | bit;
295
296         return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
297 }
298 EXPORT_SYMBOL(bit_waitqueue);