Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/linux-arm-soc
[pandora-kernel.git] / fs / btrfs / locking.c
1 /*
2  * Copyright (C) 2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/spinlock.h>
21 #include <linux/page-flags.h>
22 #include <asm/bug.h>
23 #include "ctree.h"
24 #include "extent_io.h"
25 #include "locking.h"
26
27 void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
28
29 /*
30  * if we currently have a spinning reader or writer lock
31  * (indicated by the rw flag) this will bump the count
32  * of blocking holders and drop the spinlock.
33  */
34 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
35 {
36         if (rw == BTRFS_WRITE_LOCK) {
37                 if (atomic_read(&eb->blocking_writers) == 0) {
38                         WARN_ON(atomic_read(&eb->spinning_writers) != 1);
39                         atomic_dec(&eb->spinning_writers);
40                         btrfs_assert_tree_locked(eb);
41                         atomic_inc(&eb->blocking_writers);
42                         write_unlock(&eb->lock);
43                 }
44         } else if (rw == BTRFS_READ_LOCK) {
45                 btrfs_assert_tree_read_locked(eb);
46                 atomic_inc(&eb->blocking_readers);
47                 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
48                 atomic_dec(&eb->spinning_readers);
49                 read_unlock(&eb->lock);
50         }
51         return;
52 }
53
54 /*
55  * if we currently have a blocking lock, take the spinlock
56  * and drop our blocking count
57  */
58 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
59 {
60         if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
61                 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
62                 write_lock(&eb->lock);
63                 WARN_ON(atomic_read(&eb->spinning_writers));
64                 atomic_inc(&eb->spinning_writers);
65                 if (atomic_dec_and_test(&eb->blocking_writers))
66                         wake_up(&eb->write_lock_wq);
67         } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
68                 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
69                 read_lock(&eb->lock);
70                 atomic_inc(&eb->spinning_readers);
71                 if (atomic_dec_and_test(&eb->blocking_readers))
72                         wake_up(&eb->read_lock_wq);
73         }
74         return;
75 }
76
77 /*
78  * take a spinning read lock.  This will wait for any blocking
79  * writers
80  */
81 void btrfs_tree_read_lock(struct extent_buffer *eb)
82 {
83 again:
84         wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
85         read_lock(&eb->lock);
86         if (atomic_read(&eb->blocking_writers)) {
87                 read_unlock(&eb->lock);
88                 wait_event(eb->write_lock_wq,
89                            atomic_read(&eb->blocking_writers) == 0);
90                 goto again;
91         }
92         atomic_inc(&eb->read_locks);
93         atomic_inc(&eb->spinning_readers);
94 }
95
96 /*
97  * returns 1 if we get the read lock and 0 if we don't
98  * this won't wait for blocking writers
99  */
100 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
101 {
102         if (atomic_read(&eb->blocking_writers))
103                 return 0;
104
105         read_lock(&eb->lock);
106         if (atomic_read(&eb->blocking_writers)) {
107                 read_unlock(&eb->lock);
108                 return 0;
109         }
110         atomic_inc(&eb->read_locks);
111         atomic_inc(&eb->spinning_readers);
112         return 1;
113 }
114
115 /*
116  * returns 1 if we get the read lock and 0 if we don't
117  * this won't wait for blocking writers or readers
118  */
119 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
120 {
121         if (atomic_read(&eb->blocking_writers) ||
122             atomic_read(&eb->blocking_readers))
123                 return 0;
124         write_lock(&eb->lock);
125         if (atomic_read(&eb->blocking_writers) ||
126             atomic_read(&eb->blocking_readers)) {
127                 write_unlock(&eb->lock);
128                 return 0;
129         }
130         atomic_inc(&eb->write_locks);
131         atomic_inc(&eb->spinning_writers);
132         return 1;
133 }
134
135 /*
136  * drop a spinning read lock
137  */
138 void btrfs_tree_read_unlock(struct extent_buffer *eb)
139 {
140         btrfs_assert_tree_read_locked(eb);
141         WARN_ON(atomic_read(&eb->spinning_readers) == 0);
142         atomic_dec(&eb->spinning_readers);
143         atomic_dec(&eb->read_locks);
144         read_unlock(&eb->lock);
145 }
146
147 /*
148  * drop a blocking read lock
149  */
150 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
151 {
152         btrfs_assert_tree_read_locked(eb);
153         WARN_ON(atomic_read(&eb->blocking_readers) == 0);
154         if (atomic_dec_and_test(&eb->blocking_readers))
155                 wake_up(&eb->read_lock_wq);
156         atomic_dec(&eb->read_locks);
157 }
158
159 /*
160  * take a spinning write lock.  This will wait for both
161  * blocking readers or writers
162  */
163 int btrfs_tree_lock(struct extent_buffer *eb)
164 {
165 again:
166         wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
167         wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
168         write_lock(&eb->lock);
169         if (atomic_read(&eb->blocking_readers)) {
170                 write_unlock(&eb->lock);
171                 wait_event(eb->read_lock_wq,
172                            atomic_read(&eb->blocking_readers) == 0);
173                 goto again;
174         }
175         if (atomic_read(&eb->blocking_writers)) {
176                 write_unlock(&eb->lock);
177                 wait_event(eb->write_lock_wq,
178                            atomic_read(&eb->blocking_writers) == 0);
179                 goto again;
180         }
181         WARN_ON(atomic_read(&eb->spinning_writers));
182         atomic_inc(&eb->spinning_writers);
183         atomic_inc(&eb->write_locks);
184         return 0;
185 }
186
187 /*
188  * drop a spinning or a blocking write lock.
189  */
190 int btrfs_tree_unlock(struct extent_buffer *eb)
191 {
192         int blockers = atomic_read(&eb->blocking_writers);
193
194         BUG_ON(blockers > 1);
195
196         btrfs_assert_tree_locked(eb);
197         atomic_dec(&eb->write_locks);
198
199         if (blockers) {
200                 WARN_ON(atomic_read(&eb->spinning_writers));
201                 atomic_dec(&eb->blocking_writers);
202                 smp_wmb();
203                 wake_up(&eb->write_lock_wq);
204         } else {
205                 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
206                 atomic_dec(&eb->spinning_writers);
207                 write_unlock(&eb->lock);
208         }
209         return 0;
210 }
211
212 void btrfs_assert_tree_locked(struct extent_buffer *eb)
213 {
214         BUG_ON(!atomic_read(&eb->write_locks));
215 }
216
217 void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
218 {
219         BUG_ON(!atomic_read(&eb->read_locks));
220 }