Merge git://git.kernel.org/pub/scm/linux/kernel/git/lwfinger/r8192E into staging...
[pandora-kernel.git] / drivers / staging / iio / industrialio-trigger.c
1 /* The industrial I/O core, trigger handling functions
2  *
3  * Copyright (c) 2008 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/idr.h>
13 #include <linux/err.h>
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/list.h>
17 #include <linux/slab.h>
18
19 #include "iio.h"
20 #include "trigger.h"
21 #include "iio_core.h"
22 #include "trigger_consumer.h"
23
24 /* RFC - Question of approach
25  * Make the common case (single sensor single trigger)
26  * simple by starting trigger capture from when first sensors
27  * is added.
28  *
29  * Complex simultaneous start requires use of 'hold' functionality
30  * of the trigger. (not implemented)
31  *
32  * Any other suggestions?
33  */
34
35 static DEFINE_IDR(iio_trigger_idr);
36 static DEFINE_SPINLOCK(iio_trigger_idr_lock);
37
38 /* Single list of all available triggers */
39 static LIST_HEAD(iio_trigger_list);
40 static DEFINE_MUTEX(iio_trigger_list_lock);
41
42 /**
43  * iio_trigger_read_name() - retrieve useful identifying name
44  **/
45 static ssize_t iio_trigger_read_name(struct device *dev,
46                                      struct device_attribute *attr,
47                                      char *buf)
48 {
49         struct iio_trigger *trig = dev_get_drvdata(dev);
50         return sprintf(buf, "%s\n", trig->name);
51 }
52
53 static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
54
55 /**
56  * iio_trigger_register_sysfs() - create a device for this trigger
57  * @trig_info:  the trigger
58  *
59  * Also adds any control attribute registered by the trigger driver
60  **/
61 static int iio_trigger_register_sysfs(struct iio_trigger *trig_info)
62 {
63         return sysfs_add_file_to_group(&trig_info->dev.kobj,
64                                        &dev_attr_name.attr,
65                                        NULL);
66 }
67
68 static void iio_trigger_unregister_sysfs(struct iio_trigger *trig_info)
69 {
70         sysfs_remove_file_from_group(&trig_info->dev.kobj,
71                                            &dev_attr_name.attr,
72                                            NULL);
73 }
74
75
76 /**
77  * iio_trigger_register_id() - get a unique id for this trigger
78  * @trig_info:  the trigger
79  **/
80 static int iio_trigger_register_id(struct iio_trigger *trig_info)
81 {
82         int ret = 0;
83
84 idr_again:
85         if (unlikely(idr_pre_get(&iio_trigger_idr, GFP_KERNEL) == 0))
86                 return -ENOMEM;
87
88         spin_lock(&iio_trigger_idr_lock);
89         ret = idr_get_new(&iio_trigger_idr, NULL, &trig_info->id);
90         spin_unlock(&iio_trigger_idr_lock);
91         if (unlikely(ret == -EAGAIN))
92                 goto idr_again;
93         else if (likely(!ret))
94                 trig_info->id = trig_info->id & MAX_ID_MASK;
95
96         return ret;
97 }
98
99 /**
100  * iio_trigger_unregister_id() - free up unique id for use by another trigger
101  * @trig_info: the trigger
102  **/
103 static void iio_trigger_unregister_id(struct iio_trigger *trig_info)
104 {
105         spin_lock(&iio_trigger_idr_lock);
106         idr_remove(&iio_trigger_idr, trig_info->id);
107         spin_unlock(&iio_trigger_idr_lock);
108 }
109
110 int iio_trigger_register(struct iio_trigger *trig_info)
111 {
112         int ret;
113
114         ret = iio_trigger_register_id(trig_info);
115         if (ret)
116                 goto error_ret;
117         /* Set the name used for the sysfs directory etc */
118         dev_set_name(&trig_info->dev, "trigger%ld",
119                      (unsigned long) trig_info->id);
120
121         ret = device_add(&trig_info->dev);
122         if (ret)
123                 goto error_unregister_id;
124
125         ret = iio_trigger_register_sysfs(trig_info);
126         if (ret)
127                 goto error_device_del;
128
129         /* Add to list of available triggers held by the IIO core */
130         mutex_lock(&iio_trigger_list_lock);
131         list_add_tail(&trig_info->list, &iio_trigger_list);
132         mutex_unlock(&iio_trigger_list_lock);
133
134         return 0;
135
136 error_device_del:
137         device_del(&trig_info->dev);
138 error_unregister_id:
139         iio_trigger_unregister_id(trig_info);
140 error_ret:
141         return ret;
142 }
143 EXPORT_SYMBOL(iio_trigger_register);
144
145 void iio_trigger_unregister(struct iio_trigger *trig_info)
146 {
147         mutex_lock(&iio_trigger_list_lock);
148         list_del(&trig_info->list);
149         mutex_unlock(&iio_trigger_list_lock);
150
151         iio_trigger_unregister_sysfs(trig_info);
152         iio_trigger_unregister_id(trig_info);
153         /* Possible issue in here */
154         device_unregister(&trig_info->dev);
155 }
156 EXPORT_SYMBOL(iio_trigger_unregister);
157
158 static struct iio_trigger *iio_trigger_find_by_name(const char *name,
159                                                     size_t len)
160 {
161         struct iio_trigger *trig = NULL, *iter;
162
163         mutex_lock(&iio_trigger_list_lock);
164         list_for_each_entry(iter, &iio_trigger_list, list)
165                 if (sysfs_streq(iter->name, name)) {
166                         trig = iter;
167                         break;
168                 }
169         mutex_unlock(&iio_trigger_list_lock);
170
171         return trig;
172 }
173
174 void iio_trigger_poll(struct iio_trigger *trig, s64 time)
175 {
176         int i;
177         if (!trig->use_count) {
178                 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
179                         if (trig->subirqs[i].enabled) {
180                                 trig->use_count++;
181                                 generic_handle_irq(trig->subirq_base + i);
182                         }
183         }
184 }
185 EXPORT_SYMBOL(iio_trigger_poll);
186
187 irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
188 {
189         iio_trigger_poll(private, iio_get_time_ns());
190         return IRQ_HANDLED;
191 }
192 EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
193
194 void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time)
195 {
196         int i;
197         if (!trig->use_count) {
198                 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
199                         if (trig->subirqs[i].enabled) {
200                                 trig->use_count++;
201                                 handle_nested_irq(trig->subirq_base + i);
202                         }
203         }
204 }
205 EXPORT_SYMBOL(iio_trigger_poll_chained);
206
207 void iio_trigger_notify_done(struct iio_trigger *trig)
208 {
209         trig->use_count--;
210         if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable)
211                 if (trig->ops->try_reenable(trig)) {
212                         /* Missed and interrupt so launch new poll now */
213                         iio_trigger_poll(trig, 0);
214                 }
215 }
216 EXPORT_SYMBOL(iio_trigger_notify_done);
217
218 /* Trigger Consumer related functions */
219
220 /* Complexity in here.  With certain triggers (datardy) an acknowledgement
221  * may be needed if the pollfuncs do not include the data read for the
222  * triggering device.
223  * This is not currently handled.  Alternative of not enabling trigger unless
224  * the relevant function is in there may be the best option.
225  */
226 /* Worth protecting against double additions?*/
227 int iio_trigger_attach_poll_func(struct iio_trigger *trig,
228                                  struct iio_poll_func *pf)
229 {
230         int ret = 0;
231         bool notinuse
232                 = bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
233
234         pf->irq = iio_trigger_get_irq(trig);
235         ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
236                                    pf->type, pf->name,
237                                    pf);
238         if (trig->ops && trig->ops->set_trigger_state && notinuse)
239                 ret = trig->ops->set_trigger_state(trig, true);
240
241         return ret;
242 }
243 EXPORT_SYMBOL(iio_trigger_attach_poll_func);
244
245 int iio_trigger_dettach_poll_func(struct iio_trigger *trig,
246                                   struct iio_poll_func *pf)
247 {
248         int ret = 0;
249         bool no_other_users
250                 = (bitmap_weight(trig->pool,
251                                  CONFIG_IIO_CONSUMERS_PER_TRIGGER)
252                    == 1);
253         if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
254                 ret = trig->ops->set_trigger_state(trig, false);
255                 if (ret)
256                         goto error_ret;
257         }
258         iio_trigger_put_irq(trig, pf->irq);
259         free_irq(pf->irq, pf);
260
261 error_ret:
262         return ret;
263 }
264 EXPORT_SYMBOL(iio_trigger_dettach_poll_func);
265
266 irqreturn_t iio_pollfunc_store_time(int irq, void *p)
267 {
268         struct iio_poll_func *pf = p;
269         pf->timestamp = iio_get_time_ns();
270         return IRQ_WAKE_THREAD;
271 }
272 EXPORT_SYMBOL(iio_pollfunc_store_time);
273
274 struct iio_poll_func
275 *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
276                     irqreturn_t (*thread)(int irq, void *p),
277                     int type,
278                     void *private,
279                     const char *fmt,
280                     ...)
281 {
282         va_list vargs;
283         struct iio_poll_func *pf;
284
285         pf = kmalloc(sizeof *pf, GFP_KERNEL);
286         if (pf == NULL)
287                 return NULL;
288         va_start(vargs, fmt);
289         pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
290         va_end(vargs);
291         if (pf->name == NULL) {
292                 kfree(pf);
293                 return NULL;
294         }
295         pf->h = h;
296         pf->thread = thread;
297         pf->type = type;
298         pf->private_data = private;
299
300         return pf;
301 }
302 EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
303
304 void iio_dealloc_pollfunc(struct iio_poll_func *pf)
305 {
306         kfree(pf->name);
307         kfree(pf);
308 }
309 EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
310
311 /**
312  * iio_trigger_read_currrent() - trigger consumer sysfs query which trigger
313  *
314  * For trigger consumers the current_trigger interface allows the trigger
315  * used by the device to be queried.
316  **/
317 static ssize_t iio_trigger_read_current(struct device *dev,
318                                         struct device_attribute *attr,
319                                         char *buf)
320 {
321         struct iio_dev *dev_info = dev_get_drvdata(dev);
322         int len = 0;
323         if (dev_info->trig)
324                 len = sprintf(buf,
325                               "%s\n",
326                               dev_info->trig->name);
327         return len;
328 }
329
330 /**
331  * iio_trigger_write_current() trigger consumer sysfs set current trigger
332  *
333  * For trigger consumers the current_trigger interface allows the trigger
334  * used for this device to be specified at run time based on the triggers
335  * name.
336  **/
337 static ssize_t iio_trigger_write_current(struct device *dev,
338                                          struct device_attribute *attr,
339                                          const char *buf,
340                                          size_t len)
341 {
342         struct iio_dev *dev_info = dev_get_drvdata(dev);
343         struct iio_trigger *oldtrig = dev_info->trig;
344         struct iio_trigger *trig;
345         int ret;
346
347         mutex_lock(&dev_info->mlock);
348         if (dev_info->currentmode == INDIO_RING_TRIGGERED) {
349                 mutex_unlock(&dev_info->mlock);
350                 return -EBUSY;
351         }
352         mutex_unlock(&dev_info->mlock);
353
354         trig = iio_trigger_find_by_name(buf, len);
355
356         if (trig && dev_info->info->validate_trigger) {
357                 ret = dev_info->info->validate_trigger(dev_info, trig);
358                 if (ret)
359                         return ret;
360         }
361
362         if (trig && trig->ops && trig->ops->validate_device) {
363                 ret = trig->ops->validate_device(trig, dev_info);
364                 if (ret)
365                         return ret;
366         }
367
368         dev_info->trig = trig;
369
370         if (oldtrig && dev_info->trig != oldtrig)
371                 iio_put_trigger(oldtrig);
372         if (dev_info->trig)
373                 iio_get_trigger(dev_info->trig);
374
375         return len;
376 }
377
378 static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR,
379                    iio_trigger_read_current,
380                    iio_trigger_write_current);
381
382 static struct attribute *iio_trigger_consumer_attrs[] = {
383         &dev_attr_current_trigger.attr,
384         NULL,
385 };
386
387 static const struct attribute_group iio_trigger_consumer_attr_group = {
388         .name = "trigger",
389         .attrs = iio_trigger_consumer_attrs,
390 };
391
392 static void iio_trig_release(struct device *device)
393 {
394         struct iio_trigger *trig = to_iio_trigger(device);
395         int i;
396
397         if (trig->subirq_base) {
398                 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
399                         irq_modify_status(trig->subirq_base + i,
400                                           IRQ_NOAUTOEN,
401                                           IRQ_NOREQUEST | IRQ_NOPROBE);
402                         irq_set_chip(trig->subirq_base + i,
403                                      NULL);
404                         irq_set_handler(trig->subirq_base + i,
405                                         NULL);
406                 }
407
408                 irq_free_descs(trig->subirq_base,
409                                CONFIG_IIO_CONSUMERS_PER_TRIGGER);
410         }
411         kfree(trig->name);
412         kfree(trig);
413         iio_put();
414 }
415
416 static struct device_type iio_trig_type = {
417         .release = iio_trig_release,
418 };
419
420 static void iio_trig_subirqmask(struct irq_data *d)
421 {
422         struct irq_chip *chip = irq_data_get_irq_chip(d);
423         struct iio_trigger *trig
424                 = container_of(chip,
425                                struct iio_trigger, subirq_chip);
426         trig->subirqs[d->irq - trig->subirq_base].enabled = false;
427 }
428
429 static void iio_trig_subirqunmask(struct irq_data *d)
430 {
431         struct irq_chip *chip = irq_data_get_irq_chip(d);
432         struct iio_trigger *trig
433                 = container_of(chip,
434                                struct iio_trigger, subirq_chip);
435         trig->subirqs[d->irq - trig->subirq_base].enabled = true;
436 }
437
438 struct iio_trigger *iio_allocate_trigger(const char *fmt, ...)
439 {
440         va_list vargs;
441         struct iio_trigger *trig;
442         trig = kzalloc(sizeof *trig, GFP_KERNEL);
443         if (trig) {
444                 int i;
445                 trig->dev.type = &iio_trig_type;
446                 trig->dev.bus = &iio_bus_type;
447                 device_initialize(&trig->dev);
448                 dev_set_drvdata(&trig->dev, (void *)trig);
449
450                 mutex_init(&trig->pool_lock);
451                 trig->subirq_base
452                         = irq_alloc_descs(-1, 0,
453                                           CONFIG_IIO_CONSUMERS_PER_TRIGGER,
454                                           0);
455                 if (trig->subirq_base < 0) {
456                         kfree(trig);
457                         return NULL;
458                 }
459                 va_start(vargs, fmt);
460                 trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
461                 va_end(vargs);
462                 if (trig->name == NULL) {
463                         irq_free_descs(trig->subirq_base,
464                                        CONFIG_IIO_CONSUMERS_PER_TRIGGER);
465                         kfree(trig);
466                         return NULL;
467                 }
468                 trig->subirq_chip.name = trig->name;
469                 trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
470                 trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
471                 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
472                         irq_set_chip(trig->subirq_base + i,
473                                      &trig->subirq_chip);
474                         irq_set_handler(trig->subirq_base + i,
475                                         &handle_simple_irq);
476                         irq_modify_status(trig->subirq_base + i,
477                                           IRQ_NOREQUEST | IRQ_NOAUTOEN,
478                                           IRQ_NOPROBE);
479                 }
480                 iio_get();
481         }
482         return trig;
483 }
484 EXPORT_SYMBOL(iio_allocate_trigger);
485
486 void iio_free_trigger(struct iio_trigger *trig)
487 {
488         if (trig)
489                 put_device(&trig->dev);
490 }
491 EXPORT_SYMBOL(iio_free_trigger);
492
493 int iio_device_register_trigger_consumer(struct iio_dev *dev_info)
494 {
495         int ret;
496         ret = sysfs_create_group(&dev_info->dev.kobj,
497                                  &iio_trigger_consumer_attr_group);
498         return ret;
499 }
500 EXPORT_SYMBOL(iio_device_register_trigger_consumer);
501
502 int iio_device_unregister_trigger_consumer(struct iio_dev *dev_info)
503 {
504         sysfs_remove_group(&dev_info->dev.kobj,
505                            &iio_trigger_consumer_attr_group);
506         return 0;
507 }
508 EXPORT_SYMBOL(iio_device_unregister_trigger_consumer);
509
510 int iio_triggered_ring_postenable(struct iio_dev *indio_dev)
511 {
512         return indio_dev->trig
513                 ? iio_trigger_attach_poll_func(indio_dev->trig,
514                                                indio_dev->pollfunc)
515                 : 0;
516 }
517 EXPORT_SYMBOL(iio_triggered_ring_postenable);
518
519 int iio_triggered_ring_predisable(struct iio_dev *indio_dev)
520 {
521         return indio_dev->trig
522                 ? iio_trigger_dettach_poll_func(indio_dev->trig,
523                                                 indio_dev->pollfunc)
524                 : 0;
525 }
526 EXPORT_SYMBOL(iio_triggered_ring_predisable);