Merge branch 'fix/hda' into for-linus
[pandora-kernel.git] / drivers / staging / iio / industrialio-ring.c
1 /* The industrial I/O core
2  *
3  * Copyright (c) 2008 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * Handling of ring allocation / resizing.
10  *
11  *
12  * Things to look at here.
13  * - Better memory allocation techniques?
14  * - Alternative access techniques?
15  */
16 #include <linux/kernel.h>
17 #include <linux/device.h>
18 #include <linux/interrupt.h>
19 #include <linux/fs.h>
20 #include <linux/poll.h>
21 #include <linux/module.h>
22 #include <linux/cdev.h>
23 #include <linux/idr.h>
24 #include <linux/slab.h>
25
26 #include "iio.h"
27 #include "ring_generic.h"
28
29 /* IDR for ring buffer identifier */
30 static DEFINE_IDR(iio_ring_idr);
31 /* IDR for ring event identifier */
32 static DEFINE_IDR(iio_ring_event_idr);
33 /* IDR for ring access identifier */
34 static DEFINE_IDR(iio_ring_access_idr);
35
36 int iio_push_ring_event(struct iio_ring_buffer *ring_buf,
37                        int event_code,
38                        s64 timestamp)
39 {
40         return __iio_push_event(&ring_buf->ev_int,
41                                event_code,
42                                timestamp,
43                                &ring_buf->shared_ev_pointer);
44 }
45 EXPORT_SYMBOL(iio_push_ring_event);
46
47 int iio_push_or_escallate_ring_event(struct iio_ring_buffer *ring_buf,
48                                     int event_code,
49                                     s64 timestamp)
50 {
51         if (ring_buf->shared_ev_pointer.ev_p)
52                 __iio_change_event(ring_buf->shared_ev_pointer.ev_p,
53                                    event_code,
54                                    timestamp);
55         else
56                 return iio_push_ring_event(ring_buf,
57                                           event_code,
58                                           timestamp);
59         return 0;
60 }
61 EXPORT_SYMBOL(iio_push_or_escallate_ring_event);
62
63 /**
64  * iio_ring_open() chrdev file open for ring buffer access
65  *
66  * This function relies on all ring buffer implementations having an
67  * iio_ring_buffer as their first element.
68  **/
69 int iio_ring_open(struct inode *inode, struct file *filp)
70 {
71         struct iio_handler *hand
72                 = container_of(inode->i_cdev, struct iio_handler, chrdev);
73         struct iio_ring_buffer *rb = hand->private;
74
75         filp->private_data = hand->private;
76         if (rb->access.mark_in_use)
77                 rb->access.mark_in_use(rb);
78
79         return 0;
80 }
81
82 /**
83  * iio_ring_release() -chrdev file close ring buffer access
84  *
85  * This function relies on all ring buffer implementations having an
86  * iio_ring_buffer as their first element.
87  **/
88 int iio_ring_release(struct inode *inode, struct file *filp)
89 {
90         struct cdev *cd = inode->i_cdev;
91         struct iio_handler *hand = iio_cdev_to_handler(cd);
92         struct iio_ring_buffer *rb = hand->private;
93
94         clear_bit(IIO_BUSY_BIT_POS, &rb->access_handler.flags);
95         if (rb->access.unmark_in_use)
96                 rb->access.unmark_in_use(rb);
97
98         return 0;
99 }
100
101 /**
102  * iio_ring_rip_outer() chrdev read for ring buffer access
103  *
104  * This function relies on all ring buffer implementations having an
105  * iio_ring _bufer as their first element.
106  **/
107 ssize_t iio_ring_rip_outer(struct file *filp,
108                            char *buf,
109                            size_t count,
110                            loff_t *f_ps)
111 {
112         struct iio_ring_buffer *rb = filp->private_data;
113         int ret, dead_offset, copied;
114         u8 *data;
115         /* rip lots must exist. */
116         if (!rb->access.rip_lots)
117                 return -EINVAL;
118         copied = rb->access.rip_lots(rb, count, &data, &dead_offset);
119
120         if (copied < 0) {
121                 ret = copied;
122                 goto error_ret;
123         }
124         if (copy_to_user(buf, data + dead_offset, copied))  {
125                 ret =  -EFAULT;
126                 goto error_free_data_cpy;
127         }
128         /* In clever ring buffer designs this may not need to be freed.
129          * When such a design exists I'll add this to ring access funcs.
130          */
131         kfree(data);
132
133         return copied;
134
135 error_free_data_cpy:
136         kfree(data);
137 error_ret:
138         return ret;
139 }
140
141 static const struct file_operations iio_ring_fileops = {
142         .read = iio_ring_rip_outer,
143         .release = iio_ring_release,
144         .open = iio_ring_open,
145         .owner = THIS_MODULE,
146 };
147
148 /**
149  * __iio_request_ring_buffer_event_chrdev() allocate ring event chrdev
150  * @buf:        ring buffer whose event chrdev we are allocating
151  * @owner:      the module who owns the ring buffer (for ref counting)
152  * @dev:        device with which the chrdev is associated
153  **/
154 static inline int
155 __iio_request_ring_buffer_event_chrdev(struct iio_ring_buffer *buf,
156                                        int id,
157                                        struct module *owner,
158                                        struct device *dev)
159 {
160         int ret;
161         ret = iio_get_new_idr_val(&iio_ring_event_idr);
162         if (ret < 0)
163                 goto error_ret;
164         else
165                 buf->ev_int.id = ret;
166
167         snprintf(buf->ev_int._name, 20,
168                  "ring_event_line%d",
169                  buf->ev_int.id);
170         ret = iio_setup_ev_int(&(buf->ev_int),
171                                buf->ev_int._name,
172                                owner,
173                                dev);
174         if (ret)
175                 goto error_free_id;
176         return 0;
177
178 error_free_id:
179         iio_free_idr_val(&iio_ring_event_idr, buf->ev_int.id);
180 error_ret:
181         return ret;
182 }
183
184 static inline void
185 __iio_free_ring_buffer_event_chrdev(struct iio_ring_buffer *buf)
186 {
187         iio_free_ev_int(&(buf->ev_int));
188         iio_free_idr_val(&iio_ring_event_idr, buf->ev_int.id);
189 }
190
191 static void iio_ring_access_release(struct device *dev)
192 {
193         struct iio_ring_buffer *buf
194                 = access_dev_to_iio_ring_buffer(dev);
195         cdev_del(&buf->access_handler.chrdev);
196         iio_device_free_chrdev_minor(MINOR(dev->devt));
197 }
198
199 static struct device_type iio_ring_access_type = {
200         .release = iio_ring_access_release,
201 };
202
203 static inline int
204 __iio_request_ring_buffer_access_chrdev(struct iio_ring_buffer *buf,
205                                         int id,
206                                         struct module *owner)
207 {
208         int ret, minor;
209
210         buf->access_handler.flags = 0;
211
212         buf->access_dev.parent = &buf->dev;
213         buf->access_dev.class = &iio_class;
214         buf->access_dev.type = &iio_ring_access_type;
215         device_initialize(&buf->access_dev);
216
217         minor = iio_device_get_chrdev_minor();
218         if (minor < 0) {
219                 ret = minor;
220                 goto error_device_put;
221         }
222         buf->access_dev.devt = MKDEV(MAJOR(iio_devt), minor);
223
224         ret = iio_get_new_idr_val(&iio_ring_access_idr);
225         if (ret < 0)
226                 goto error_device_put;
227         else
228                 buf->access_id = ret;
229         dev_set_name(&buf->access_dev, "ring_access%d", buf->access_id);
230         ret = device_add(&buf->access_dev);
231         if (ret < 0) {
232                 printk(KERN_ERR "failed to add the ring access dev\n");
233                 goto error_free_idr;
234         }
235
236         cdev_init(&buf->access_handler.chrdev, &iio_ring_fileops);
237         buf->access_handler.chrdev.owner = owner;
238
239         ret = cdev_add(&buf->access_handler.chrdev, buf->access_dev.devt, 1);
240         if (ret) {
241                 printk(KERN_ERR "failed to allocate ring access chrdev\n");
242                 goto error_device_unregister;
243         }
244         return 0;
245 error_device_unregister:
246         device_unregister(&buf->access_dev);
247 error_free_idr:
248         iio_free_idr_val(&iio_ring_access_idr, buf->access_id);
249 error_device_put:
250         put_device(&buf->access_dev);
251
252         return ret;
253 }
254
255 static void __iio_free_ring_buffer_access_chrdev(struct iio_ring_buffer *buf)
256 {
257         iio_free_idr_val(&iio_ring_access_idr, buf->access_id);
258         device_unregister(&buf->access_dev);
259 }
260
261 void iio_ring_buffer_init(struct iio_ring_buffer *ring,
262                           struct iio_dev *dev_info)
263 {
264         if (ring->access.mark_param_change)
265                 ring->access.mark_param_change(ring);
266         ring->indio_dev = dev_info;
267         ring->ev_int.private = ring;
268         ring->access_handler.private = ring;
269 }
270 EXPORT_SYMBOL(iio_ring_buffer_init);
271
272 int iio_ring_buffer_register(struct iio_ring_buffer *ring)
273 {
274         int ret;
275         ret = iio_get_new_idr_val(&iio_ring_idr);
276         if (ret < 0)
277                 goto error_ret;
278         else
279                 ring->id = ret;
280
281         dev_set_name(&ring->dev, "ring_buffer%d", ring->id);
282         ret = device_add(&ring->dev);
283         if (ret)
284                 goto error_free_id;
285
286         ret = __iio_request_ring_buffer_event_chrdev(ring,
287                                                      0,
288                                                      ring->owner,
289                                                      &ring->dev);
290         if (ret)
291                 goto error_remove_device;
292
293         ret = __iio_request_ring_buffer_access_chrdev(ring,
294                                                       0,
295                                                       ring->owner);
296
297         if (ret)
298                 goto error_free_ring_buffer_event_chrdev;
299
300         return ret;
301 error_free_ring_buffer_event_chrdev:
302         __iio_free_ring_buffer_event_chrdev(ring);
303 error_remove_device:
304         device_del(&ring->dev);
305 error_free_id:
306         iio_free_idr_val(&iio_ring_idr, ring->id);
307 error_ret:
308         return ret;
309 }
310 EXPORT_SYMBOL(iio_ring_buffer_register);
311
312 void iio_ring_buffer_unregister(struct iio_ring_buffer *ring)
313 {
314         __iio_free_ring_buffer_access_chrdev(ring);
315         __iio_free_ring_buffer_event_chrdev(ring);
316         device_del(&ring->dev);
317         iio_free_idr_val(&iio_ring_idr, ring->id);
318 }
319 EXPORT_SYMBOL(iio_ring_buffer_unregister);
320
321 ssize_t iio_read_ring_length(struct device *dev,
322                              struct device_attribute *attr,
323                              char *buf)
324 {
325         int len = 0;
326         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
327
328         if (ring->access.get_length)
329                 len = sprintf(buf, "%d\n",
330                               ring->access.get_length(ring));
331
332         return len;
333 }
334 EXPORT_SYMBOL(iio_read_ring_length);
335
336  ssize_t iio_write_ring_length(struct device *dev,
337                                struct device_attribute *attr,
338                                const char *buf,
339                                size_t len)
340 {
341         int ret;
342         ulong val;
343         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
344         ret = strict_strtoul(buf, 10, &val);
345         if (ret)
346                 return ret;
347
348         if (ring->access.get_length)
349                 if (val == ring->access.get_length(ring))
350                         return len;
351
352         if (ring->access.set_length) {
353                 ring->access.set_length(ring, val);
354                 if (ring->access.mark_param_change)
355                         ring->access.mark_param_change(ring);
356         }
357
358         return len;
359 }
360 EXPORT_SYMBOL(iio_write_ring_length);
361
362 ssize_t iio_read_ring_bps(struct device *dev,
363                           struct device_attribute *attr,
364                           char *buf)
365 {
366         int len = 0;
367         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
368
369         if (ring->access.get_bpd)
370                 len = sprintf(buf, "%d\n",
371                               ring->access.get_bpd(ring));
372
373         return len;
374 }
375 EXPORT_SYMBOL(iio_read_ring_bps);
376
377 ssize_t iio_store_ring_enable(struct device *dev,
378                               struct device_attribute *attr,
379                               const char *buf,
380                               size_t len)
381 {
382         int ret;
383         bool requested_state, current_state;
384         int previous_mode;
385         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
386         struct iio_dev *dev_info = ring->indio_dev;
387
388         mutex_lock(&dev_info->mlock);
389         previous_mode = dev_info->currentmode;
390         requested_state = !(buf[0] == '0');
391         current_state = !!(previous_mode & INDIO_ALL_RING_MODES);
392         if (current_state == requested_state) {
393                 printk(KERN_INFO "iio-ring, current state requested again\n");
394                 goto done;
395         }
396         if (requested_state) {
397                 if (ring->preenable) {
398                         ret = ring->preenable(dev_info);
399                         if (ret) {
400                                 printk(KERN_ERR
401                                        "Buffer not started:"
402                                        "ring preenable failed\n");
403                                 goto error_ret;
404                         }
405                 }
406                 if (ring->access.request_update) {
407                         ret = ring->access.request_update(ring);
408                         if (ret) {
409                                 printk(KERN_INFO
410                                        "Buffer not started:"
411                                        "ring parameter update failed\n");
412                                 goto error_ret;
413                         }
414                 }
415                 if (ring->access.mark_in_use)
416                         ring->access.mark_in_use(ring);
417                 /* Definitely possible for devices to support both of these.*/
418                 if (dev_info->modes & INDIO_RING_TRIGGERED) {
419                         if (!dev_info->trig) {
420                                 printk(KERN_INFO
421                                        "Buffer not started: no trigger\n");
422                                 ret = -EINVAL;
423                                 if (ring->access.unmark_in_use)
424                                         ring->access.unmark_in_use(ring);
425                                 goto error_ret;
426                         }
427                         dev_info->currentmode = INDIO_RING_TRIGGERED;
428                 } else if (dev_info->modes & INDIO_RING_HARDWARE_BUFFER)
429                         dev_info->currentmode = INDIO_RING_HARDWARE_BUFFER;
430                 else { /* should never be reached */
431                         ret = -EINVAL;
432                         goto error_ret;
433                 }
434
435                 if (ring->postenable) {
436
437                         ret = ring->postenable(dev_info);
438                         if (ret) {
439                                 printk(KERN_INFO
440                                        "Buffer not started:"
441                                        "postenable failed\n");
442                                 if (ring->access.unmark_in_use)
443                                         ring->access.unmark_in_use(ring);
444                                 dev_info->currentmode = previous_mode;
445                                 if (ring->postdisable)
446                                         ring->postdisable(dev_info);
447                                 goto error_ret;
448                         }
449                 }
450         } else {
451                 if (ring->predisable) {
452                         ret = ring->predisable(dev_info);
453                         if (ret)
454                                 goto error_ret;
455                 }
456                 if (ring->access.unmark_in_use)
457                         ring->access.unmark_in_use(ring);
458                 dev_info->currentmode = INDIO_DIRECT_MODE;
459                 if (ring->postdisable) {
460                         ret = ring->postdisable(dev_info);
461                         if (ret)
462                                 goto error_ret;
463                 }
464         }
465 done:
466         mutex_unlock(&dev_info->mlock);
467         return len;
468
469 error_ret:
470         mutex_unlock(&dev_info->mlock);
471         return ret;
472 }
473 EXPORT_SYMBOL(iio_store_ring_enable);
474 ssize_t iio_show_ring_enable(struct device *dev,
475                                     struct device_attribute *attr,
476                                     char *buf)
477 {
478         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
479         return sprintf(buf, "%d\n", !!(ring->indio_dev->currentmode
480                                        & INDIO_ALL_RING_MODES));
481 }
482 EXPORT_SYMBOL(iio_show_ring_enable);
483
484 ssize_t iio_scan_el_show(struct device *dev,
485                          struct device_attribute *attr,
486                          char *buf)
487 {
488         int ret;
489         struct iio_dev *indio_dev = dev_get_drvdata(dev);
490         struct iio_scan_el *this_el = to_iio_scan_el(attr);
491
492         ret = iio_scan_mask_query(indio_dev, this_el->number);
493         if (ret < 0)
494                 return ret;
495         return sprintf(buf, "%d\n", ret);
496 }
497 EXPORT_SYMBOL(iio_scan_el_show);
498
499 ssize_t iio_scan_el_store(struct device *dev,
500                           struct device_attribute *attr,
501                           const char *buf,
502                           size_t len)
503 {
504         int ret = 0;
505         bool state;
506         struct iio_dev *indio_dev = dev_get_drvdata(dev);
507         struct iio_scan_el *this_el = to_iio_scan_el(attr);
508
509         state = !(buf[0] == '0');
510         mutex_lock(&indio_dev->mlock);
511         if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
512                 ret = -EBUSY;
513                 goto error_ret;
514         }
515         ret = iio_scan_mask_query(indio_dev, this_el->number);
516         if (ret < 0)
517                 goto error_ret;
518         if (!state && ret) {
519                 ret = iio_scan_mask_clear(indio_dev, this_el->number);
520                 if (ret)
521                         goto error_ret;
522                 indio_dev->scan_count--;
523         } else if (state && !ret) {
524                 ret = iio_scan_mask_set(indio_dev, this_el->number);
525                 if (ret)
526                         goto error_ret;
527                 indio_dev->scan_count++;
528         }
529         if (this_el->set_state)
530                 ret = this_el->set_state(this_el, indio_dev, state);
531 error_ret:
532         mutex_unlock(&indio_dev->mlock);
533
534         return ret ? ret : len;
535
536 }
537 EXPORT_SYMBOL(iio_scan_el_store);
538
539 ssize_t iio_scan_el_ts_show(struct device *dev,
540                             struct device_attribute *attr,
541                             char *buf)
542 {
543         struct iio_dev *indio_dev = dev_get_drvdata(dev);
544         return sprintf(buf, "%d\n", indio_dev->scan_timestamp);
545 }
546 EXPORT_SYMBOL(iio_scan_el_ts_show);
547
548 ssize_t iio_scan_el_ts_store(struct device *dev,
549                              struct device_attribute *attr,
550                              const char *buf,
551                              size_t len)
552 {
553         int ret = 0;
554         struct iio_dev *indio_dev = dev_get_drvdata(dev);
555         bool state;
556         state = !(buf[0] == '0');
557         mutex_lock(&indio_dev->mlock);
558         if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
559                 ret = -EBUSY;
560                 goto error_ret;
561         }
562         indio_dev->scan_timestamp = state;
563 error_ret:
564         mutex_unlock(&indio_dev->mlock);
565
566         return ret ? ret : len;
567 }
568 EXPORT_SYMBOL(iio_scan_el_ts_store);
569