Merge branch 'for-3.0' of git://git.kernel.org/pub/scm/linux/kernel/git/lrg/asoc...
[pandora-kernel.git] / include / linux / seqlock.h
1 #ifndef __LINUX_SEQLOCK_H
2 #define __LINUX_SEQLOCK_H
3 /*
4  * Reader/writer consistent mechanism without starving writers. This type of
5  * lock for data where the reader wants a consistent set of information
6  * and is willing to retry if the information changes.  Readers never
7  * block but they may have to retry if a writer is in
8  * progress. Writers do not wait for readers. 
9  *
10  * This is not as cache friendly as brlock. Also, this will not work
11  * for data that contains pointers, because any writer could
12  * invalidate a pointer that a reader was following.
13  *
14  * Expected reader usage:
15  *      do {
16  *          seq = read_seqbegin(&foo);
17  *      ...
18  *      } while (read_seqretry(&foo, seq));
19  *
20  *
21  * On non-SMP the spin locks disappear but the writer still needs
22  * to increment the sequence variables because an interrupt routine could
23  * change the state of the data.
24  *
25  * Based on x86_64 vsyscall gettimeofday 
26  * by Keith Owens and Andrea Arcangeli
27  */
28
29 #include <linux/spinlock.h>
30 #include <linux/preempt.h>
31
32 typedef struct {
33         unsigned sequence;
34         spinlock_t lock;
35 } seqlock_t;
36
37 /*
38  * These macros triggered gcc-3.x compile-time problems.  We think these are
39  * OK now.  Be cautious.
40  */
41 #define __SEQLOCK_UNLOCKED(lockname) \
42                  { 0, __SPIN_LOCK_UNLOCKED(lockname) }
43
44 #define seqlock_init(x)                                 \
45         do {                                            \
46                 (x)->sequence = 0;                      \
47                 spin_lock_init(&(x)->lock);             \
48         } while (0)
49
50 #define DEFINE_SEQLOCK(x) \
51                 seqlock_t x = __SEQLOCK_UNLOCKED(x)
52
53 /* Lock out other writers and update the count.
54  * Acts like a normal spin_lock/unlock.
55  * Don't need preempt_disable() because that is in the spin_lock already.
56  */
57 static inline void write_seqlock(seqlock_t *sl)
58 {
59         spin_lock(&sl->lock);
60         ++sl->sequence;
61         smp_wmb();
62 }
63
64 static inline void write_sequnlock(seqlock_t *sl)
65 {
66         smp_wmb();
67         sl->sequence++;
68         spin_unlock(&sl->lock);
69 }
70
71 static inline int write_tryseqlock(seqlock_t *sl)
72 {
73         int ret = spin_trylock(&sl->lock);
74
75         if (ret) {
76                 ++sl->sequence;
77                 smp_wmb();
78         }
79         return ret;
80 }
81
82 /* Start of read calculation -- fetch last complete writer token */
83 static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
84 {
85         unsigned ret;
86
87 repeat:
88         ret = ACCESS_ONCE(sl->sequence);
89         if (unlikely(ret & 1)) {
90                 cpu_relax();
91                 goto repeat;
92         }
93         smp_rmb();
94
95         return ret;
96 }
97
98 /*
99  * Test if reader processed invalid data.
100  *
101  * If sequence value changed then writer changed data while in section.
102  */
103 static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
104 {
105         smp_rmb();
106
107         return unlikely(sl->sequence != start);
108 }
109
110
111 /*
112  * Version using sequence counter only.
113  * This can be used when code has its own mutex protecting the
114  * updating starting before the write_seqcountbeqin() and ending
115  * after the write_seqcount_end().
116  */
117
118 typedef struct seqcount {
119         unsigned sequence;
120 } seqcount_t;
121
122 #define SEQCNT_ZERO { 0 }
123 #define seqcount_init(x)        do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
124
125 /**
126  * __read_seqcount_begin - begin a seq-read critical section (without barrier)
127  * @s: pointer to seqcount_t
128  * Returns: count to be passed to read_seqcount_retry
129  *
130  * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
131  * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
132  * provided before actually loading any of the variables that are to be
133  * protected in this critical section.
134  *
135  * Use carefully, only in critical code, and comment how the barrier is
136  * provided.
137  */
138 static inline unsigned __read_seqcount_begin(const seqcount_t *s)
139 {
140         unsigned ret;
141
142 repeat:
143         ret = s->sequence;
144         if (unlikely(ret & 1)) {
145                 cpu_relax();
146                 goto repeat;
147         }
148         return ret;
149 }
150
151 /**
152  * read_seqcount_begin - begin a seq-read critical section
153  * @s: pointer to seqcount_t
154  * Returns: count to be passed to read_seqcount_retry
155  *
156  * read_seqcount_begin opens a read critical section of the given seqcount.
157  * Validity of the critical section is tested by checking read_seqcount_retry
158  * function.
159  */
160 static inline unsigned read_seqcount_begin(const seqcount_t *s)
161 {
162         unsigned ret = __read_seqcount_begin(s);
163         smp_rmb();
164         return ret;
165 }
166
167 /**
168  * __read_seqcount_retry - end a seq-read critical section (without barrier)
169  * @s: pointer to seqcount_t
170  * @start: count, from read_seqcount_begin
171  * Returns: 1 if retry is required, else 0
172  *
173  * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
174  * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
175  * provided before actually loading any of the variables that are to be
176  * protected in this critical section.
177  *
178  * Use carefully, only in critical code, and comment how the barrier is
179  * provided.
180  */
181 static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
182 {
183         return unlikely(s->sequence != start);
184 }
185
186 /**
187  * read_seqcount_retry - end a seq-read critical section
188  * @s: pointer to seqcount_t
189  * @start: count, from read_seqcount_begin
190  * Returns: 1 if retry is required, else 0
191  *
192  * read_seqcount_retry closes a read critical section of the given seqcount.
193  * If the critical section was invalid, it must be ignored (and typically
194  * retried).
195  */
196 static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
197 {
198         smp_rmb();
199
200         return __read_seqcount_retry(s, start);
201 }
202
203
204 /*
205  * Sequence counter only version assumes that callers are using their
206  * own mutexing.
207  */
208 static inline void write_seqcount_begin(seqcount_t *s)
209 {
210         s->sequence++;
211         smp_wmb();
212 }
213
214 static inline void write_seqcount_end(seqcount_t *s)
215 {
216         smp_wmb();
217         s->sequence++;
218 }
219
220 /**
221  * write_seqcount_barrier - invalidate in-progress read-side seq operations
222  * @s: pointer to seqcount_t
223  *
224  * After write_seqcount_barrier, no read-side seq operations will complete
225  * successfully and see data older than this.
226  */
227 static inline void write_seqcount_barrier(seqcount_t *s)
228 {
229         smp_wmb();
230         s->sequence+=2;
231 }
232
233 /*
234  * Possible sw/hw IRQ protected versions of the interfaces.
235  */
236 #define write_seqlock_irqsave(lock, flags)                              \
237         do { local_irq_save(flags); write_seqlock(lock); } while (0)
238 #define write_seqlock_irq(lock)                                         \
239         do { local_irq_disable();   write_seqlock(lock); } while (0)
240 #define write_seqlock_bh(lock)                                          \
241         do { local_bh_disable();    write_seqlock(lock); } while (0)
242
243 #define write_sequnlock_irqrestore(lock, flags)                         \
244         do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
245 #define write_sequnlock_irq(lock)                                       \
246         do { write_sequnlock(lock); local_irq_enable(); } while(0)
247 #define write_sequnlock_bh(lock)                                        \
248         do { write_sequnlock(lock); local_bh_enable(); } while(0)
249
250 #define read_seqbegin_irqsave(lock, flags)                              \
251         ({ local_irq_save(flags);   read_seqbegin(lock); })
252
253 #define read_seqretry_irqrestore(lock, iv, flags)                       \
254         ({                                                              \
255                 int ret = read_seqretry(lock, iv);                      \
256                 local_irq_restore(flags);                               \
257                 ret;                                                    \
258         })
259
260 #endif /* __LINUX_SEQLOCK_H */