Merge 'staging-next' to Linus's tree
[pandora-kernel.git] / drivers / staging / iio / industrialio-ring.c
1 /* The industrial I/O core
2  *
3  * Copyright (c) 2008 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * Handling of ring allocation / resizing.
10  *
11  *
12  * Things to look at here.
13  * - Better memory allocation techniques?
14  * - Alternative access techniques?
15  */
16 #include <linux/kernel.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/poll.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22
23 #include "iio.h"
24 #include "ring_generic.h"
25
26 int iio_push_ring_event(struct iio_ring_buffer *ring_buf,
27                        int event_code,
28                        s64 timestamp)
29 {
30         return __iio_push_event(&ring_buf->ev_int,
31                                event_code,
32                                timestamp,
33                                &ring_buf->shared_ev_pointer);
34 }
35 EXPORT_SYMBOL(iio_push_ring_event);
36
37 int iio_push_or_escallate_ring_event(struct iio_ring_buffer *ring_buf,
38                                     int event_code,
39                                     s64 timestamp)
40 {
41         if (ring_buf->shared_ev_pointer.ev_p)
42                 __iio_change_event(ring_buf->shared_ev_pointer.ev_p,
43                                    event_code,
44                                    timestamp);
45         else
46                 return iio_push_ring_event(ring_buf,
47                                           event_code,
48                                           timestamp);
49         return 0;
50 }
51 EXPORT_SYMBOL(iio_push_or_escallate_ring_event);
52
53 /**
54  * iio_ring_open() - chrdev file open for ring buffer access
55  *
56  * This function relies on all ring buffer implementations having an
57  * iio_ring_buffer as their first element.
58  **/
59 static int iio_ring_open(struct inode *inode, struct file *filp)
60 {
61         struct iio_handler *hand
62                 = container_of(inode->i_cdev, struct iio_handler, chrdev);
63         struct iio_ring_buffer *rb = hand->private;
64
65         filp->private_data = hand->private;
66         if (rb->access.mark_in_use)
67                 rb->access.mark_in_use(rb);
68
69         return 0;
70 }
71
72 /**
73  * iio_ring_release() - chrdev file close ring buffer access
74  *
75  * This function relies on all ring buffer implementations having an
76  * iio_ring_buffer as their first element.
77  **/
78 static int iio_ring_release(struct inode *inode, struct file *filp)
79 {
80         struct cdev *cd = inode->i_cdev;
81         struct iio_handler *hand = iio_cdev_to_handler(cd);
82         struct iio_ring_buffer *rb = hand->private;
83
84         clear_bit(IIO_BUSY_BIT_POS, &rb->access_handler.flags);
85         if (rb->access.unmark_in_use)
86                 rb->access.unmark_in_use(rb);
87
88         return 0;
89 }
90
91 /**
92  * iio_ring_rip_outer() - chrdev read for ring buffer access
93  *
94  * This function relies on all ring buffer implementations having an
95  * iio_ring _bufer as their first element.
96  **/
97 static ssize_t iio_ring_rip_outer(struct file *filp, char __user *buf,
98                                   size_t count, loff_t *f_ps)
99 {
100         struct iio_ring_buffer *rb = filp->private_data;
101         int ret, dead_offset, copied;
102         u8 *data;
103         /* rip lots must exist. */
104         if (!rb->access.rip_lots)
105                 return -EINVAL;
106         copied = rb->access.rip_lots(rb, count, &data, &dead_offset);
107
108         if (copied <= 0) {
109                 ret = copied;
110                 goto error_ret;
111         }
112         if (copy_to_user(buf, data + dead_offset, copied))  {
113                 ret =  -EFAULT;
114                 goto error_free_data_cpy;
115         }
116         /* In clever ring buffer designs this may not need to be freed.
117          * When such a design exists I'll add this to ring access funcs.
118          */
119         kfree(data);
120
121         return copied;
122
123 error_free_data_cpy:
124         kfree(data);
125 error_ret:
126         return ret;
127 }
128
129 static const struct file_operations iio_ring_fileops = {
130         .read = iio_ring_rip_outer,
131         .release = iio_ring_release,
132         .open = iio_ring_open,
133         .owner = THIS_MODULE,
134         .llseek = noop_llseek,
135 };
136
137 /**
138  * __iio_request_ring_buffer_event_chrdev() - allocate ring event chrdev
139  * @buf:        ring buffer whose event chrdev we are allocating
140  * @id:         id of this ring buffer (typically 0)
141  * @owner:      the module who owns the ring buffer (for ref counting)
142  * @dev:        device with which the chrdev is associated
143  **/
144 static inline int
145 __iio_request_ring_buffer_event_chrdev(struct iio_ring_buffer *buf,
146                                        int id,
147                                        struct module *owner,
148                                        struct device *dev)
149 {
150         int ret;
151
152         snprintf(buf->ev_int._name, sizeof(buf->ev_int._name),
153                  "%s:event%d",
154                  dev_name(&buf->dev),
155                  id);
156         ret = iio_setup_ev_int(&(buf->ev_int),
157                                buf->ev_int._name,
158                                owner,
159                                dev);
160         if (ret)
161                 goto error_ret;
162         return 0;
163
164 error_ret:
165         return ret;
166 }
167
168 static inline void
169 __iio_free_ring_buffer_event_chrdev(struct iio_ring_buffer *buf)
170 {
171         iio_free_ev_int(&(buf->ev_int));
172 }
173
174 static void iio_ring_access_release(struct device *dev)
175 {
176         struct iio_ring_buffer *buf
177                 = access_dev_to_iio_ring_buffer(dev);
178         cdev_del(&buf->access_handler.chrdev);
179         iio_device_free_chrdev_minor(MINOR(dev->devt));
180 }
181
182 static struct device_type iio_ring_access_type = {
183         .release = iio_ring_access_release,
184 };
185
186 static inline int
187 __iio_request_ring_buffer_access_chrdev(struct iio_ring_buffer *buf,
188                                         int id,
189                                         struct module *owner)
190 {
191         int ret, minor;
192
193         buf->access_handler.flags = 0;
194
195         buf->access_dev.parent = &buf->dev;
196         buf->access_dev.bus = &iio_bus_type;
197         buf->access_dev.type = &iio_ring_access_type;
198         device_initialize(&buf->access_dev);
199
200         minor = iio_device_get_chrdev_minor();
201         if (minor < 0) {
202                 ret = minor;
203                 goto error_device_put;
204         }
205         buf->access_dev.devt = MKDEV(MAJOR(iio_devt), minor);
206
207
208         buf->access_id = id;
209
210         dev_set_name(&buf->access_dev, "%s:access%d",
211                      dev_name(&buf->dev),
212                      buf->access_id);
213         ret = device_add(&buf->access_dev);
214         if (ret < 0) {
215                 printk(KERN_ERR "failed to add the ring access dev\n");
216                 goto error_device_put;
217         }
218
219         cdev_init(&buf->access_handler.chrdev, &iio_ring_fileops);
220         buf->access_handler.chrdev.owner = owner;
221
222         ret = cdev_add(&buf->access_handler.chrdev, buf->access_dev.devt, 1);
223         if (ret) {
224                 printk(KERN_ERR "failed to allocate ring access chrdev\n");
225                 goto error_device_unregister;
226         }
227         return 0;
228
229 error_device_unregister:
230         device_unregister(&buf->access_dev);
231 error_device_put:
232         put_device(&buf->access_dev);
233
234         return ret;
235 }
236
237 static void __iio_free_ring_buffer_access_chrdev(struct iio_ring_buffer *buf)
238 {
239         device_unregister(&buf->access_dev);
240 }
241
242 void iio_ring_buffer_init(struct iio_ring_buffer *ring,
243                           struct iio_dev *dev_info)
244 {
245         if (ring->access.mark_param_change)
246                 ring->access.mark_param_change(ring);
247         ring->indio_dev = dev_info;
248         ring->ev_int.private = ring;
249         ring->access_handler.private = ring;
250         ring->shared_ev_pointer.ev_p = NULL;
251         spin_lock_init(&ring->shared_ev_pointer.lock);
252 }
253 EXPORT_SYMBOL(iio_ring_buffer_init);
254
255 int iio_ring_buffer_register(struct iio_ring_buffer *ring, int id)
256 {
257         int ret;
258
259         ring->id = id;
260
261         dev_set_name(&ring->dev, "%s:buffer%d",
262                      dev_name(ring->dev.parent),
263                      ring->id);
264         ret = device_add(&ring->dev);
265         if (ret)
266                 goto error_ret;
267
268         ret = __iio_request_ring_buffer_event_chrdev(ring,
269                                                      0,
270                                                      ring->owner,
271                                                      &ring->dev);
272         if (ret)
273                 goto error_remove_device;
274
275         ret = __iio_request_ring_buffer_access_chrdev(ring,
276                                                       0,
277                                                       ring->owner);
278
279         if (ret)
280                 goto error_free_ring_buffer_event_chrdev;
281
282         if (ring->scan_el_attrs) {
283                 ret = sysfs_create_group(&ring->dev.kobj,
284                                          ring->scan_el_attrs);
285                 if (ret) {
286                         dev_err(&ring->dev,
287                                 "Failed to add sysfs scan elements\n");
288                         goto error_free_ring_buffer_event_chrdev;
289                 }
290         }
291
292         return ret;
293 error_free_ring_buffer_event_chrdev:
294         __iio_free_ring_buffer_event_chrdev(ring);
295 error_remove_device:
296         device_del(&ring->dev);
297 error_ret:
298         return ret;
299 }
300 EXPORT_SYMBOL(iio_ring_buffer_register);
301
302 void iio_ring_buffer_unregister(struct iio_ring_buffer *ring)
303 {
304         if (ring->scan_el_attrs)
305                 sysfs_remove_group(&ring->dev.kobj,
306                                    ring->scan_el_attrs);
307
308         __iio_free_ring_buffer_access_chrdev(ring);
309         __iio_free_ring_buffer_event_chrdev(ring);
310         device_del(&ring->dev);
311 }
312 EXPORT_SYMBOL(iio_ring_buffer_unregister);
313
314 ssize_t iio_read_ring_length(struct device *dev,
315                              struct device_attribute *attr,
316                              char *buf)
317 {
318         int len = 0;
319         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
320
321         if (ring->access.get_length)
322                 len = sprintf(buf, "%d\n",
323                               ring->access.get_length(ring));
324
325         return len;
326 }
327 EXPORT_SYMBOL(iio_read_ring_length);
328
329 ssize_t iio_write_ring_length(struct device *dev,
330                                struct device_attribute *attr,
331                                const char *buf,
332                                size_t len)
333 {
334         int ret;
335         ulong val;
336         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
337         ret = strict_strtoul(buf, 10, &val);
338         if (ret)
339                 return ret;
340
341         if (ring->access.get_length)
342                 if (val == ring->access.get_length(ring))
343                         return len;
344
345         if (ring->access.set_length) {
346                 ring->access.set_length(ring, val);
347                 if (ring->access.mark_param_change)
348                         ring->access.mark_param_change(ring);
349         }
350
351         return len;
352 }
353 EXPORT_SYMBOL(iio_write_ring_length);
354
355 ssize_t iio_read_ring_bytes_per_datum(struct device *dev,
356                           struct device_attribute *attr,
357                           char *buf)
358 {
359         int len = 0;
360         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
361
362         if (ring->access.get_bytes_per_datum)
363                 len = sprintf(buf, "%d\n",
364                               ring->access.get_bytes_per_datum(ring));
365
366         return len;
367 }
368 EXPORT_SYMBOL(iio_read_ring_bytes_per_datum);
369
370 ssize_t iio_store_ring_enable(struct device *dev,
371                               struct device_attribute *attr,
372                               const char *buf,
373                               size_t len)
374 {
375         int ret;
376         bool requested_state, current_state;
377         int previous_mode;
378         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
379         struct iio_dev *dev_info = ring->indio_dev;
380
381         mutex_lock(&dev_info->mlock);
382         previous_mode = dev_info->currentmode;
383         requested_state = !(buf[0] == '0');
384         current_state = !!(previous_mode & INDIO_ALL_RING_MODES);
385         if (current_state == requested_state) {
386                 printk(KERN_INFO "iio-ring, current state requested again\n");
387                 goto done;
388         }
389         if (requested_state) {
390                 if (ring->preenable) {
391                         ret = ring->preenable(dev_info);
392                         if (ret) {
393                                 printk(KERN_ERR
394                                        "Buffer not started:"
395                                        "ring preenable failed\n");
396                                 goto error_ret;
397                         }
398                 }
399                 if (ring->access.request_update) {
400                         ret = ring->access.request_update(ring);
401                         if (ret) {
402                                 printk(KERN_INFO
403                                        "Buffer not started:"
404                                        "ring parameter update failed\n");
405                                 goto error_ret;
406                         }
407                 }
408                 if (ring->access.mark_in_use)
409                         ring->access.mark_in_use(ring);
410                 /* Definitely possible for devices to support both of these.*/
411                 if (dev_info->modes & INDIO_RING_TRIGGERED) {
412                         if (!dev_info->trig) {
413                                 printk(KERN_INFO
414                                        "Buffer not started: no trigger\n");
415                                 ret = -EINVAL;
416                                 if (ring->access.unmark_in_use)
417                                         ring->access.unmark_in_use(ring);
418                                 goto error_ret;
419                         }
420                         dev_info->currentmode = INDIO_RING_TRIGGERED;
421                 } else if (dev_info->modes & INDIO_RING_HARDWARE_BUFFER)
422                         dev_info->currentmode = INDIO_RING_HARDWARE_BUFFER;
423                 else { /* should never be reached */
424                         ret = -EINVAL;
425                         goto error_ret;
426                 }
427
428                 if (ring->postenable) {
429
430                         ret = ring->postenable(dev_info);
431                         if (ret) {
432                                 printk(KERN_INFO
433                                        "Buffer not started:"
434                                        "postenable failed\n");
435                                 if (ring->access.unmark_in_use)
436                                         ring->access.unmark_in_use(ring);
437                                 dev_info->currentmode = previous_mode;
438                                 if (ring->postdisable)
439                                         ring->postdisable(dev_info);
440                                 goto error_ret;
441                         }
442                 }
443         } else {
444                 if (ring->predisable) {
445                         ret = ring->predisable(dev_info);
446                         if (ret)
447                                 goto error_ret;
448                 }
449                 if (ring->access.unmark_in_use)
450                         ring->access.unmark_in_use(ring);
451                 dev_info->currentmode = INDIO_DIRECT_MODE;
452                 if (ring->postdisable) {
453                         ret = ring->postdisable(dev_info);
454                         if (ret)
455                                 goto error_ret;
456                 }
457         }
458 done:
459         mutex_unlock(&dev_info->mlock);
460         return len;
461
462 error_ret:
463         mutex_unlock(&dev_info->mlock);
464         return ret;
465 }
466 EXPORT_SYMBOL(iio_store_ring_enable);
467 ssize_t iio_show_ring_enable(struct device *dev,
468                                     struct device_attribute *attr,
469                                     char *buf)
470 {
471         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
472         return sprintf(buf, "%d\n", !!(ring->indio_dev->currentmode
473                                        & INDIO_ALL_RING_MODES));
474 }
475 EXPORT_SYMBOL(iio_show_ring_enable);
476
477 ssize_t iio_scan_el_show(struct device *dev,
478                          struct device_attribute *attr,
479                          char *buf)
480 {
481         int ret;
482         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
483         struct iio_scan_el *this_el = to_iio_scan_el(attr);
484
485         ret = iio_scan_mask_query(ring, this_el->number);
486         if (ret < 0)
487                 return ret;
488         return sprintf(buf, "%d\n", ret);
489 }
490 EXPORT_SYMBOL(iio_scan_el_show);
491
492 ssize_t iio_scan_el_store(struct device *dev,
493                           struct device_attribute *attr,
494                           const char *buf,
495                           size_t len)
496 {
497         int ret = 0;
498         bool state;
499         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
500         struct iio_dev *indio_dev = ring->indio_dev;
501         struct iio_scan_el *this_el = to_iio_scan_el(attr);
502
503         state = !(buf[0] == '0');
504         mutex_lock(&indio_dev->mlock);
505         if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
506                 ret = -EBUSY;
507                 goto error_ret;
508         }
509         ret = iio_scan_mask_query(ring, this_el->number);
510         if (ret < 0)
511                 goto error_ret;
512         if (!state && ret) {
513                 ret = iio_scan_mask_clear(ring, this_el->number);
514                 if (ret)
515                         goto error_ret;
516         } else if (state && !ret) {
517                 ret = iio_scan_mask_set(ring, this_el->number);
518                 if (ret)
519                         goto error_ret;
520         }
521         if (this_el->set_state)
522                 ret = this_el->set_state(this_el, indio_dev, state);
523 error_ret:
524         mutex_unlock(&indio_dev->mlock);
525
526         return ret ? ret : len;
527
528 }
529 EXPORT_SYMBOL(iio_scan_el_store);
530
531 ssize_t iio_scan_el_ts_show(struct device *dev,
532                             struct device_attribute *attr,
533                             char *buf)
534 {
535         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
536         return sprintf(buf, "%d\n", ring->scan_timestamp);
537 }
538 EXPORT_SYMBOL(iio_scan_el_ts_show);
539
540 ssize_t iio_scan_el_ts_store(struct device *dev,
541                              struct device_attribute *attr,
542                              const char *buf,
543                              size_t len)
544 {
545         int ret = 0;
546         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
547         struct iio_dev *indio_dev = ring->indio_dev;
548         bool state;
549         state = !(buf[0] == '0');
550         mutex_lock(&indio_dev->mlock);
551         if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
552                 ret = -EBUSY;
553                 goto error_ret;
554         }
555         ring->scan_timestamp = state;
556 error_ret:
557         mutex_unlock(&indio_dev->mlock);
558
559         return ret ? ret : len;
560 }
561 EXPORT_SYMBOL(iio_scan_el_ts_store);
562