Merge branch 'gpio/merge' of git://git.secretlab.ca/git/linux-2.6
[pandora-kernel.git] / drivers / staging / iio / ring_sw.c
1 /* The industrial I/O simple minimally locked ring buffer.
2  *
3  * Copyright (c) 2008 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  */
9
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/workqueue.h>
15 #include <linux/sched.h>
16 #include <linux/poll.h>
17 #include "ring_sw.h"
18 #include "trigger.h"
19
20 /**
21  * struct iio_sw_ring_buffer - software ring buffer
22  * @buf:                generic ring buffer elements
23  * @data:               the ring buffer memory
24  * @read_p:             read pointer (oldest available)
25  * @write_p:            write pointer
26  * @last_written_p:     read pointer (newest available)
27  * @half_p:             half buffer length behind write_p (event generation)
28  * @use_count:          reference count to prevent resizing when in use
29  * @update_needed:      flag to indicated change in size requested
30  * @use_lock:           lock to prevent change in size when in use
31  *
32  * Note that the first element of all ring buffers must be a
33  * struct iio_buffer.
34 **/
35 struct iio_sw_ring_buffer {
36         struct iio_buffer  buf;
37         unsigned char           *data;
38         unsigned char           *read_p;
39         unsigned char           *write_p;
40         unsigned char           *last_written_p;
41         /* used to act as a point at which to signal an event */
42         unsigned char           *half_p;
43         int                     use_count;
44         int                     update_needed;
45         spinlock_t              use_lock;
46 };
47
48 #define iio_to_sw_ring(r) container_of(r, struct iio_sw_ring_buffer, buf)
49
50 static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
51                                                 int bytes_per_datum, int length)
52 {
53         if ((length == 0) || (bytes_per_datum == 0))
54                 return -EINVAL;
55         __iio_update_buffer(&ring->buf, bytes_per_datum, length);
56         ring->data = kmalloc(length*ring->buf.bytes_per_datum, GFP_ATOMIC);
57         ring->read_p = NULL;
58         ring->write_p = NULL;
59         ring->last_written_p = NULL;
60         ring->half_p = NULL;
61         return ring->data ? 0 : -ENOMEM;
62 }
63
64 static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
65 {
66         spin_lock_init(&ring->use_lock);
67 }
68
69 static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
70 {
71         kfree(ring->data);
72 }
73
74 static void iio_mark_sw_rb_in_use(struct iio_buffer *r)
75 {
76         struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
77         spin_lock(&ring->use_lock);
78         ring->use_count++;
79         spin_unlock(&ring->use_lock);
80 }
81
82 static void iio_unmark_sw_rb_in_use(struct iio_buffer *r)
83 {
84         struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
85         spin_lock(&ring->use_lock);
86         ring->use_count--;
87         spin_unlock(&ring->use_lock);
88 }
89
90
91 /* Ring buffer related functionality */
92 /* Store to ring is typically called in the bh of a data ready interrupt handler
93  * in the device driver */
94 /* Lock always held if their is a chance this may be called */
95 /* Only one of these per ring may run concurrently - enforced by drivers */
96 static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
97                                 unsigned char *data, s64 timestamp)
98 {
99         int ret = 0;
100         unsigned char *temp_ptr, *change_test_ptr;
101
102         /* initial store */
103         if (unlikely(ring->write_p == NULL)) {
104                 ring->write_p = ring->data;
105                 /* Doesn't actually matter if this is out of the set
106                  * as long as the read pointer is valid before this
107                  * passes it - guaranteed as set later in this function.
108                  */
109                 ring->half_p = ring->data - ring->buf.length*ring->buf.bytes_per_datum/2;
110         }
111         /* Copy data to where ever the current write pointer says */
112         memcpy(ring->write_p, data, ring->buf.bytes_per_datum);
113         barrier();
114         /* Update the pointer used to get most recent value.
115          * Always valid as either points to latest or second latest value.
116          * Before this runs it is null and read attempts fail with -EAGAIN.
117          */
118         ring->last_written_p = ring->write_p;
119         barrier();
120         /* temp_ptr used to ensure we never have an invalid pointer
121          * it may be slightly lagging, but never invalid
122          */
123         temp_ptr = ring->write_p + ring->buf.bytes_per_datum;
124         /* End of ring, back to the beginning */
125         if (temp_ptr == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
126                 temp_ptr = ring->data;
127         /* Update the write pointer
128          * always valid as long as this is the only function able to write.
129          * Care needed with smp systems to ensure more than one ring fill
130          * is never scheduled.
131          */
132         ring->write_p = temp_ptr;
133
134         if (ring->read_p == NULL)
135                 ring->read_p = ring->data;
136         /* Buffer full - move the read pointer and create / escalate
137          * ring event */
138         /* Tricky case - if the read pointer moves before we adjust it.
139          * Handle by not pushing if it has moved - may result in occasional
140          * unnecessary buffer full events when it wasn't quite true.
141          */
142         else if (ring->write_p == ring->read_p) {
143                 change_test_ptr = ring->read_p;
144                 temp_ptr = change_test_ptr + ring->buf.bytes_per_datum;
145                 if (temp_ptr
146                     == ring->data + ring->buf.length*ring->buf.bytes_per_datum) {
147                         temp_ptr = ring->data;
148                 }
149                 /* We are moving pointer on one because the ring is full.  Any
150                  * change to the read pointer will be this or greater.
151                  */
152                 if (change_test_ptr == ring->read_p)
153                         ring->read_p = temp_ptr;
154         }
155         /* investigate if our event barrier has been passed */
156         /* There are definite 'issues' with this and chances of
157          * simultaneous read */
158         /* Also need to use loop count to ensure this only happens once */
159         ring->half_p += ring->buf.bytes_per_datum;
160         if (ring->half_p == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
161                 ring->half_p = ring->data;
162         if (ring->half_p == ring->read_p) {
163                 ring->buf.stufftoread = true;
164                 wake_up_interruptible(&ring->buf.pollq);
165         }
166         return ret;
167 }
168
169 static int iio_read_first_n_sw_rb(struct iio_buffer *r,
170                                   size_t n, char __user *buf)
171 {
172         struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
173
174         u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
175         u8 *data;
176         int ret, max_copied, bytes_to_rip, dead_offset;
177
178         /* A userspace program has probably made an error if it tries to
179          *  read something that is not a whole number of bpds.
180          * Return an error.
181          */
182         if (n % ring->buf.bytes_per_datum) {
183                 ret = -EINVAL;
184                 printk(KERN_INFO "Ring buffer read request not whole number of"
185                        "samples: Request bytes %zd, Current bytes per datum %d\n",
186                        n, ring->buf.bytes_per_datum);
187                 goto error_ret;
188         }
189         /* Limit size to whole of ring buffer */
190         bytes_to_rip = min((size_t)(ring->buf.bytes_per_datum*ring->buf.length),
191                            n);
192
193         data = kmalloc(bytes_to_rip, GFP_KERNEL);
194         if (data == NULL) {
195                 ret = -ENOMEM;
196                 goto error_ret;
197         }
198
199         /* build local copy */
200         initial_read_p = ring->read_p;
201         if (unlikely(initial_read_p == NULL)) { /* No data here as yet */
202                 ret = 0;
203                 goto error_free_data_cpy;
204         }
205
206         initial_write_p = ring->write_p;
207
208         /* Need a consistent pair */
209         while ((initial_read_p != ring->read_p)
210                || (initial_write_p != ring->write_p)) {
211                 initial_read_p = ring->read_p;
212                 initial_write_p = ring->write_p;
213         }
214         if (initial_write_p == initial_read_p) {
215                 /* No new data available.*/
216                 ret = 0;
217                 goto error_free_data_cpy;
218         }
219
220         if (initial_write_p >= initial_read_p + bytes_to_rip) {
221                 /* write_p is greater than necessary, all is easy */
222                 max_copied = bytes_to_rip;
223                 memcpy(data, initial_read_p, max_copied);
224                 end_read_p = initial_read_p + max_copied;
225         } else if (initial_write_p > initial_read_p) {
226                 /*not enough data to cpy */
227                 max_copied = initial_write_p - initial_read_p;
228                 memcpy(data, initial_read_p, max_copied);
229                 end_read_p = initial_write_p;
230         } else {
231                 /* going through 'end' of ring buffer */
232                 max_copied = ring->data
233                         + ring->buf.length*ring->buf.bytes_per_datum - initial_read_p;
234                 memcpy(data, initial_read_p, max_copied);
235                 /* possible we are done if we align precisely with end */
236                 if (max_copied == bytes_to_rip)
237                         end_read_p = ring->data;
238                 else if (initial_write_p
239                          > ring->data + bytes_to_rip - max_copied) {
240                         /* enough data to finish */
241                         memcpy(data + max_copied, ring->data,
242                                bytes_to_rip - max_copied);
243                         max_copied = bytes_to_rip;
244                         end_read_p = ring->data + (bytes_to_rip - max_copied);
245                 } else {  /* not enough data */
246                         memcpy(data + max_copied, ring->data,
247                                initial_write_p - ring->data);
248                         max_copied += initial_write_p - ring->data;
249                         end_read_p = initial_write_p;
250                 }
251         }
252         /* Now to verify which section was cleanly copied - i.e. how far
253          * read pointer has been pushed */
254         current_read_p = ring->read_p;
255
256         if (initial_read_p <= current_read_p)
257                 dead_offset = current_read_p - initial_read_p;
258         else
259                 dead_offset = ring->buf.length*ring->buf.bytes_per_datum
260                         - (initial_read_p - current_read_p);
261
262         /* possible issue if the initial write has been lapped or indeed
263          * the point we were reading to has been passed */
264         /* No valid data read.
265          * In this case the read pointer is already correct having been
266          * pushed further than we would look. */
267         if (max_copied - dead_offset < 0) {
268                 ret = 0;
269                 goto error_free_data_cpy;
270         }
271
272         /* setup the next read position */
273         /* Beware, this may fail due to concurrency fun and games.
274          *  Possible that sufficient fill commands have run to push the read
275          * pointer past where we would be after the rip. If this occurs, leave
276          * it be.
277          */
278         /* Tricky - deal with loops */
279
280         while (ring->read_p != end_read_p)
281                 ring->read_p = end_read_p;
282
283         ret = max_copied - dead_offset;
284
285         if (copy_to_user(buf, data + dead_offset, ret))  {
286                 ret =  -EFAULT;
287                 goto error_free_data_cpy;
288         }
289
290         if (bytes_to_rip >= ring->buf.length*ring->buf.bytes_per_datum/2)
291                 ring->buf.stufftoread = 0;
292
293 error_free_data_cpy:
294         kfree(data);
295 error_ret:
296
297         return ret;
298 }
299
300 static int iio_store_to_sw_rb(struct iio_buffer *r,
301                               u8 *data,
302                               s64 timestamp)
303 {
304         struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
305         return iio_store_to_sw_ring(ring, data, timestamp);
306 }
307
308 static int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring,
309                                       unsigned char *data)
310 {
311         unsigned char *last_written_p_copy;
312
313         iio_mark_sw_rb_in_use(&ring->buf);
314 again:
315         barrier();
316         last_written_p_copy = ring->last_written_p;
317         barrier(); /*unnessecary? */
318         /* Check there is anything here */
319         if (last_written_p_copy == NULL)
320                 return -EAGAIN;
321         memcpy(data, last_written_p_copy, ring->buf.bytes_per_datum);
322
323         if (unlikely(ring->last_written_p != last_written_p_copy))
324                 goto again;
325
326         iio_unmark_sw_rb_in_use(&ring->buf);
327         return 0;
328 }
329
330 static int iio_read_last_from_sw_rb(struct iio_buffer *r,
331                              unsigned char *data)
332 {
333         return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data);
334 }
335
336 static int iio_request_update_sw_rb(struct iio_buffer *r)
337 {
338         int ret = 0;
339         struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
340
341         r->stufftoread = false;
342         spin_lock(&ring->use_lock);
343         if (!ring->update_needed)
344                 goto error_ret;
345         if (ring->use_count) {
346                 ret = -EAGAIN;
347                 goto error_ret;
348         }
349         __iio_free_sw_ring_buffer(ring);
350         ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bytes_per_datum,
351                                             ring->buf.length);
352 error_ret:
353         spin_unlock(&ring->use_lock);
354         return ret;
355 }
356
357 static int iio_get_bytes_per_datum_sw_rb(struct iio_buffer *r)
358 {
359         struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
360         return ring->buf.bytes_per_datum;
361 }
362
363 static int iio_set_bytes_per_datum_sw_rb(struct iio_buffer *r, size_t bpd)
364 {
365         if (r->bytes_per_datum != bpd) {
366                 r->bytes_per_datum = bpd;
367                 if (r->access->mark_param_change)
368                         r->access->mark_param_change(r);
369         }
370         return 0;
371 }
372
373 static int iio_get_length_sw_rb(struct iio_buffer *r)
374 {
375         return r->length;
376 }
377
378 static int iio_set_length_sw_rb(struct iio_buffer *r, int length)
379 {
380         if (r->length != length) {
381                 r->length = length;
382                 if (r->access->mark_param_change)
383                         r->access->mark_param_change(r);
384         }
385         return 0;
386 }
387
388 static int iio_mark_update_needed_sw_rb(struct iio_buffer *r)
389 {
390         struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
391         ring->update_needed = true;
392         return 0;
393 }
394
395 static IIO_BUFFER_ENABLE_ATTR;
396 static IIO_BUFFER_BYTES_PER_DATUM_ATTR;
397 static IIO_BUFFER_LENGTH_ATTR;
398
399 /* Standard set of ring buffer attributes */
400 static struct attribute *iio_ring_attributes[] = {
401         &dev_attr_length.attr,
402         &dev_attr_bytes_per_datum.attr,
403         &dev_attr_enable.attr,
404         NULL,
405 };
406
407 static struct attribute_group iio_ring_attribute_group = {
408         .attrs = iio_ring_attributes,
409         .name = "buffer",
410 };
411
412 struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
413 {
414         struct iio_buffer *buf;
415         struct iio_sw_ring_buffer *ring;
416
417         ring = kzalloc(sizeof *ring, GFP_KERNEL);
418         if (!ring)
419                 return NULL;
420         ring->update_needed = true;
421         buf = &ring->buf;
422         iio_buffer_init(buf, indio_dev);
423         __iio_init_sw_ring_buffer(ring);
424         buf->attrs = &iio_ring_attribute_group;
425
426         return buf;
427 }
428 EXPORT_SYMBOL(iio_sw_rb_allocate);
429
430 void iio_sw_rb_free(struct iio_buffer *r)
431 {
432         kfree(iio_to_sw_ring(r));
433 }
434 EXPORT_SYMBOL(iio_sw_rb_free);
435
436 const struct iio_buffer_access_funcs ring_sw_access_funcs = {
437         .mark_in_use = &iio_mark_sw_rb_in_use,
438         .unmark_in_use = &iio_unmark_sw_rb_in_use,
439         .store_to = &iio_store_to_sw_rb,
440         .read_last = &iio_read_last_from_sw_rb,
441         .read_first_n = &iio_read_first_n_sw_rb,
442         .mark_param_change = &iio_mark_update_needed_sw_rb,
443         .request_update = &iio_request_update_sw_rb,
444         .get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
445         .set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
446         .get_length = &iio_get_length_sw_rb,
447         .set_length = &iio_set_length_sw_rb,
448 };
449 EXPORT_SYMBOL(ring_sw_access_funcs);
450
451 MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
452 MODULE_LICENSE("GPL");