1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Based on elements of hwmon and input subsystems.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/idr.h>
15 #include <linux/kdev_t.h>
16 #include <linux/err.h>
17 #include <linux/device.h>
19 #include <linux/interrupt.h>
20 #include <linux/poll.h>
21 #include <linux/sched.h>
22 #include <linux/wait.h>
23 #include <linux/cdev.h>
24 #include <linux/slab.h>
26 #include "trigger_consumer.h"
28 #define IIO_ID_PREFIX "device"
29 #define IIO_ID_FORMAT IIO_ID_PREFIX "%d"
31 /* IDR to assign each registered device a unique id*/
32 static DEFINE_IDA(iio_ida);
33 /* IDR to allocate character device minor numbers */
34 static DEFINE_IDA(iio_chrdev_ida);
35 /* Lock used to protect both of the above */
36 static DEFINE_SPINLOCK(iio_ida_lock);
39 EXPORT_SYMBOL(iio_devt);
41 #define IIO_DEV_MAX 256
42 struct bus_type iio_bus_type = {
45 EXPORT_SYMBOL(iio_bus_type);
47 static const char * const iio_chan_type_name_spec_shared[] = {
48 [IIO_TIMESTAMP] = "timestamp",
49 [IIO_ACCEL] = "accel",
51 [IIO_IN_DIFF] = "in-in",
55 [IIO_INCLI] = "incli",
57 [IIO_INTENSITY] = "intensity",
58 [IIO_LIGHT] = "illuminance",
62 static const char * const iio_chan_type_name_spec_complex[] = {
63 [IIO_IN_DIFF] = "in%d-in%d",
66 static const char * const iio_modifier_names_light[] = {
67 [IIO_MOD_LIGHT_BOTH] = "both",
68 [IIO_MOD_LIGHT_IR] = "ir",
71 static const char * const iio_modifier_names_axial[] = {
77 /* relies on pairs of these shared then separate */
78 static const char * const iio_chan_info_postfix[] = {
79 [IIO_CHAN_INFO_SCALE_SHARED/2] = "scale",
80 [IIO_CHAN_INFO_OFFSET_SHARED/2] = "offset",
81 [IIO_CHAN_INFO_CALIBSCALE_SHARED/2] = "calibscale",
82 [IIO_CHAN_INFO_CALIBBIAS_SHARED/2] = "calibbias",
85 /* Used both in the interrupt line put events and the ring buffer ones */
87 /* Note that in it's current form someone has to be listening before events
88 * are queued. Hence a client MUST open the chrdev before the ring buffer is
91 int __iio_push_event(struct iio_event_interface *ev_int,
95 struct iio_detected_event_list *ev;
98 /* Does anyone care? */
99 mutex_lock(&ev_int->event_list_lock);
100 if (test_bit(IIO_BUSY_BIT_POS, &ev_int->handler.flags)) {
101 if (ev_int->current_events == ev_int->max_events) {
102 mutex_unlock(&ev_int->event_list_lock);
105 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
108 mutex_unlock(&ev_int->event_list_lock);
112 ev->ev.timestamp = timestamp;
114 list_add_tail(&ev->list, &ev_int->det_events.list);
115 ev_int->current_events++;
116 mutex_unlock(&ev_int->event_list_lock);
117 wake_up_interruptible(&ev_int->wait);
119 mutex_unlock(&ev_int->event_list_lock);
124 EXPORT_SYMBOL(__iio_push_event);
126 int iio_push_event(struct iio_dev *dev_info,
131 return __iio_push_event(&dev_info->event_interfaces[ev_line],
134 EXPORT_SYMBOL(iio_push_event);
136 /* Generic interrupt line interrupt handler */
137 static irqreturn_t iio_interrupt_handler(int irq, void *_int_info)
139 struct iio_interrupt *int_info = _int_info;
140 struct iio_dev *dev_info = int_info->dev_info;
141 struct iio_event_handler_list *p;
145 spin_lock_irqsave(&int_info->ev_list_lock, flags);
146 if (list_empty(&int_info->ev_list)) {
147 spin_unlock_irqrestore(&int_info->ev_list_lock, flags);
151 time_ns = iio_get_time_ns();
152 list_for_each_entry(p, &int_info->ev_list, list) {
153 disable_irq_nosync(irq);
154 p->handler(dev_info, 1, time_ns, !(p->refcount > 1));
156 spin_unlock_irqrestore(&int_info->ev_list_lock, flags);
161 static struct iio_interrupt *iio_allocate_interrupt(void)
163 struct iio_interrupt *i = kmalloc(sizeof *i, GFP_KERNEL);
165 spin_lock_init(&i->ev_list_lock);
166 INIT_LIST_HEAD(&i->ev_list);
171 /* Confirming the validity of supplied irq is left to drivers.*/
172 int iio_register_interrupt_line(unsigned int irq,
173 struct iio_dev *dev_info,
180 dev_info->interrupts[line_number] = iio_allocate_interrupt();
181 if (dev_info->interrupts[line_number] == NULL) {
185 dev_info->interrupts[line_number]->line_number = line_number;
186 dev_info->interrupts[line_number]->irq = irq;
187 dev_info->interrupts[line_number]->dev_info = dev_info;
189 /* Possibly only request on demand?
190 * Can see this may complicate the handling of interrupts.
191 * However, with this approach we might end up handling lots of
192 * events no-one cares about.*/
193 ret = request_irq(irq,
194 &iio_interrupt_handler,
197 dev_info->interrupts[line_number]);
202 EXPORT_SYMBOL(iio_register_interrupt_line);
204 /* This turns up an awful lot */
205 ssize_t iio_read_const_attr(struct device *dev,
206 struct device_attribute *attr,
209 return sprintf(buf, "%s\n", to_iio_const_attr(attr)->string);
211 EXPORT_SYMBOL(iio_read_const_attr);
213 /* Before this runs the interrupt generator must have been disabled */
214 void iio_unregister_interrupt_line(struct iio_dev *dev_info, int line_number)
216 /* make sure the interrupt handlers are all done */
217 flush_scheduled_work();
218 free_irq(dev_info->interrupts[line_number]->irq,
219 dev_info->interrupts[line_number]);
220 kfree(dev_info->interrupts[line_number]);
222 EXPORT_SYMBOL(iio_unregister_interrupt_line);
224 /* Reference counted add and remove */
225 void iio_add_event_to_list(struct iio_event_handler_list *el,
226 struct list_head *head)
229 struct iio_interrupt *inter = to_iio_interrupt(head);
231 /* take mutex to protect this element */
232 mutex_lock(&el->exist_lock);
233 if (el->refcount == 0) {
234 /* Take the event list spin lock */
235 spin_lock_irqsave(&inter->ev_list_lock, flags);
236 list_add(&el->list, head);
237 spin_unlock_irqrestore(&inter->ev_list_lock, flags);
240 mutex_unlock(&el->exist_lock);
242 EXPORT_SYMBOL(iio_add_event_to_list);
244 void iio_remove_event_from_list(struct iio_event_handler_list *el,
245 struct list_head *head)
248 struct iio_interrupt *inter = to_iio_interrupt(head);
250 mutex_lock(&el->exist_lock);
252 if (el->refcount == 0) {
253 /* Take the event list spin lock */
254 spin_lock_irqsave(&inter->ev_list_lock, flags);
255 list_del_init(&el->list);
256 spin_unlock_irqrestore(&inter->ev_list_lock, flags);
258 mutex_unlock(&el->exist_lock);
260 EXPORT_SYMBOL(iio_remove_event_from_list);
262 static ssize_t iio_event_chrdev_read(struct file *filep,
267 struct iio_event_interface *ev_int = filep->private_data;
268 struct iio_detected_event_list *el;
272 mutex_lock(&ev_int->event_list_lock);
273 if (list_empty(&ev_int->det_events.list)) {
274 if (filep->f_flags & O_NONBLOCK) {
276 goto error_mutex_unlock;
278 mutex_unlock(&ev_int->event_list_lock);
279 /* Blocking on device; waiting for something to be there */
280 ret = wait_event_interruptible(ev_int->wait,
285 /* Single access device so no one else can get the data */
286 mutex_lock(&ev_int->event_list_lock);
289 el = list_first_entry(&ev_int->det_events.list,
290 struct iio_detected_event_list,
293 if (copy_to_user(buf, &(el->ev), len)) {
295 goto error_mutex_unlock;
298 ev_int->current_events--;
299 mutex_unlock(&ev_int->event_list_lock);
305 mutex_unlock(&ev_int->event_list_lock);
311 static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
313 struct iio_handler *hand = iio_cdev_to_handler(inode->i_cdev);
314 struct iio_event_interface *ev_int = hand->private;
315 struct iio_detected_event_list *el, *t;
317 mutex_lock(&ev_int->event_list_lock);
318 clear_bit(IIO_BUSY_BIT_POS, &ev_int->handler.flags);
320 * In order to maintain a clean state for reopening,
321 * clear out any awaiting events. The mask will prevent
322 * any new __iio_push_event calls running.
324 list_for_each_entry_safe(el, t, &ev_int->det_events.list, list) {
328 mutex_unlock(&ev_int->event_list_lock);
333 static int iio_event_chrdev_open(struct inode *inode, struct file *filep)
335 struct iio_handler *hand = iio_cdev_to_handler(inode->i_cdev);
336 struct iio_event_interface *ev_int = hand->private;
338 mutex_lock(&ev_int->event_list_lock);
339 if (test_and_set_bit(IIO_BUSY_BIT_POS, &hand->flags)) {
340 fops_put(filep->f_op);
341 mutex_unlock(&ev_int->event_list_lock);
344 filep->private_data = hand->private;
345 mutex_unlock(&ev_int->event_list_lock);
350 static const struct file_operations iio_event_chrdev_fileops = {
351 .read = iio_event_chrdev_read,
352 .release = iio_event_chrdev_release,
353 .open = iio_event_chrdev_open,
354 .owner = THIS_MODULE,
355 .llseek = noop_llseek,
358 static void iio_event_dev_release(struct device *dev)
360 struct iio_event_interface *ev_int
361 = container_of(dev, struct iio_event_interface, dev);
362 cdev_del(&ev_int->handler.chrdev);
363 iio_device_free_chrdev_minor(MINOR(dev->devt));
366 static struct device_type iio_event_type = {
367 .release = iio_event_dev_release,
370 int iio_device_get_chrdev_minor(void)
375 if (unlikely(ida_pre_get(&iio_chrdev_ida, GFP_KERNEL) == 0))
377 spin_lock(&iio_ida_lock);
378 ret = ida_get_new(&iio_chrdev_ida, &val);
379 spin_unlock(&iio_ida_lock);
380 if (unlikely(ret == -EAGAIN))
382 else if (unlikely(ret))
384 if (val > IIO_DEV_MAX)
389 void iio_device_free_chrdev_minor(int val)
391 spin_lock(&iio_ida_lock);
392 ida_remove(&iio_chrdev_ida, val);
393 spin_unlock(&iio_ida_lock);
396 int iio_setup_ev_int(struct iio_event_interface *ev_int,
398 struct module *owner,
403 ev_int->dev.bus = &iio_bus_type;
404 ev_int->dev.parent = dev;
405 ev_int->dev.type = &iio_event_type;
406 device_initialize(&ev_int->dev);
408 minor = iio_device_get_chrdev_minor();
411 goto error_device_put;
413 ev_int->dev.devt = MKDEV(MAJOR(iio_devt), minor);
414 dev_set_name(&ev_int->dev, "%s", name);
416 ret = device_add(&ev_int->dev);
418 goto error_free_minor;
420 cdev_init(&ev_int->handler.chrdev, &iio_event_chrdev_fileops);
421 ev_int->handler.chrdev.owner = owner;
423 mutex_init(&ev_int->event_list_lock);
424 /* discussion point - make this variable? */
425 ev_int->max_events = 10;
426 ev_int->current_events = 0;
427 INIT_LIST_HEAD(&ev_int->det_events.list);
428 init_waitqueue_head(&ev_int->wait);
429 ev_int->handler.private = ev_int;
430 ev_int->handler.flags = 0;
432 ret = cdev_add(&ev_int->handler.chrdev, ev_int->dev.devt, 1);
434 goto error_unreg_device;
439 device_unregister(&ev_int->dev);
441 iio_device_free_chrdev_minor(minor);
443 put_device(&ev_int->dev);
448 void iio_free_ev_int(struct iio_event_interface *ev_int)
450 device_unregister(&ev_int->dev);
451 put_device(&ev_int->dev);
454 static int __init iio_dev_init(void)
458 err = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
460 printk(KERN_ERR "%s: failed to allocate char dev region\n",
466 static void __exit iio_dev_exit(void)
469 unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
472 static int __init iio_init(void)
476 /* Register sysfs bus */
477 ret = bus_register(&iio_bus_type);
480 "%s could not register bus type\n",
485 ret = iio_dev_init();
487 goto error_unregister_bus_type;
491 error_unregister_bus_type:
492 bus_unregister(&iio_bus_type);
497 static void __exit iio_exit(void)
500 bus_unregister(&iio_bus_type);
503 static ssize_t iio_read_channel_info(struct device *dev,
504 struct device_attribute *attr,
507 struct iio_dev *indio_dev = dev_get_drvdata(dev);
508 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
510 int ret = indio_dev->read_raw(indio_dev, this_attr->c,
511 &val, &val2, this_attr->address);
516 if (ret == IIO_VAL_INT)
517 return sprintf(buf, "%d\n", val);
518 else if (ret == IIO_VAL_INT_PLUS_MICRO) {
520 return sprintf(buf, "-%d.%06u\n", val, -val2);
522 return sprintf(buf, "%d.%06u\n", val, val2);
527 static ssize_t iio_write_channel_info(struct device *dev,
528 struct device_attribute *attr,
532 struct iio_dev *indio_dev = dev_get_drvdata(dev);
533 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
534 int ret, integer = 0, micro = 0, micro_mult = 100000;
535 bool integer_part = true, negative = false;
537 /* Assumes decimal - precision based on number of digits */
538 if (!indio_dev->write_raw)
545 if ('0' <= *buf && *buf <= '9') {
547 integer = integer*10 + *buf - '0';
549 micro += micro_mult*(*buf - '0');
554 } else if (*buf == '\n') {
555 if (*(buf + 1) == '\0')
559 } else if (*buf == '.') {
560 integer_part = false;
573 ret = indio_dev->write_raw(indio_dev, this_attr->c,
574 integer, micro, this_attr->address);
581 static int __iio_build_postfix(struct iio_chan_spec const *chan,
587 /* 3 options - generic, extend_name, modified - if generic, extend_name
588 * and modified cannot apply.*/
590 if (generic || (!chan->modified && !chan->extend_name)) {
591 all_post = kasprintf(GFP_KERNEL, "%s", postfix);
592 } else if (chan->modified) {
593 const char *intermediate;
594 switch (chan->type) {
597 = iio_modifier_names_light[chan->channel2];
606 = iio_modifier_names_axial[chan->channel2];
611 if (chan->extend_name)
612 all_post = kasprintf(GFP_KERNEL, "%s_%s_%s",
617 all_post = kasprintf(GFP_KERNEL, "%s_%s",
621 all_post = kasprintf(GFP_KERNEL, "%s_%s", chan->extend_name,
623 if (all_post == NULL)
629 int __iio_device_attr_init(struct device_attribute *dev_attr,
631 struct iio_chan_spec const *chan,
632 ssize_t (*readfunc)(struct device *dev,
633 struct device_attribute *attr,
635 ssize_t (*writefunc)(struct device *dev,
636 struct device_attribute *attr,
642 char *name_format, *full_postfix;
643 sysfs_attr_init(&dev_attr->attr);
644 ret = __iio_build_postfix(chan, generic, postfix, &full_postfix);
648 /* Special case for types that uses both channel numbers in naming */
649 if (chan->type == IIO_IN_DIFF && !generic)
651 = kasprintf(GFP_KERNEL, "%s_%s",
652 iio_chan_type_name_spec_complex[chan->type],
654 else if (generic || !chan->indexed)
656 = kasprintf(GFP_KERNEL, "%s_%s",
657 iio_chan_type_name_spec_shared[chan->type],
661 = kasprintf(GFP_KERNEL, "%s%d_%s",
662 iio_chan_type_name_spec_shared[chan->type],
666 if (name_format == NULL) {
668 goto error_free_full_postfix;
670 dev_attr->attr.name = kasprintf(GFP_KERNEL,
674 if (dev_attr->attr.name == NULL) {
676 goto error_free_name_format;
680 dev_attr->attr.mode |= S_IRUGO;
681 dev_attr->show = readfunc;
685 dev_attr->attr.mode |= S_IWUSR;
686 dev_attr->store = writefunc;
693 error_free_name_format:
695 error_free_full_postfix:
701 void __iio_device_attr_deinit(struct device_attribute *dev_attr)
703 kfree(dev_attr->attr.name);
706 int __iio_add_chan_devattr(const char *postfix,
708 struct iio_chan_spec const *chan,
709 ssize_t (*readfunc)(struct device *dev,
710 struct device_attribute *attr,
712 ssize_t (*writefunc)(struct device *dev,
713 struct device_attribute *attr,
719 struct list_head *attr_list)
722 struct iio_dev_attr *iio_attr, *t;
724 iio_attr = kzalloc(sizeof *iio_attr, GFP_KERNEL);
725 if (iio_attr == NULL) {
729 ret = __iio_device_attr_init(&iio_attr->dev_attr,
731 readfunc, writefunc, generic);
733 goto error_iio_dev_attr_free;
735 iio_attr->address = mask;
736 list_for_each_entry(t, attr_list, l)
737 if (strcmp(t->dev_attr.attr.name,
738 iio_attr->dev_attr.attr.name) == 0) {
740 dev_err(dev, "tried to double register : %s\n",
741 t->dev_attr.attr.name);
743 goto error_device_attr_deinit;
746 ret = sysfs_add_file_to_group(&dev->kobj,
747 &iio_attr->dev_attr.attr, group);
749 goto error_device_attr_deinit;
751 list_add(&iio_attr->l, attr_list);
755 error_device_attr_deinit:
756 __iio_device_attr_deinit(&iio_attr->dev_attr);
757 error_iio_dev_attr_free:
763 static int iio_device_add_channel_sysfs(struct iio_dev *dev_info,
764 struct iio_chan_spec const *chan)
769 if (chan->channel < 0)
771 if (chan->processed_val)
772 ret = __iio_add_chan_devattr("input", NULL, chan,
773 &iio_read_channel_info,
778 &dev_info->channel_attr_list);
780 ret = __iio_add_chan_devattr("raw", NULL, chan,
781 &iio_read_channel_info,
786 &dev_info->channel_attr_list);
790 for_each_set_bit(i, &chan->info_mask, sizeof(long)*8) {
791 ret = __iio_add_chan_devattr(iio_chan_info_postfix[i/2],
793 &iio_read_channel_info,
794 &iio_write_channel_info,
798 &dev_info->channel_attr_list);
799 if (ret == -EBUSY && (i%2 == 0)) {
810 static void iio_device_remove_and_free_read_attr(struct iio_dev *dev_info,
811 struct iio_dev_attr *p)
813 sysfs_remove_file_from_group(&dev_info->dev.kobj,
814 &p->dev_attr.attr, NULL);
815 kfree(p->dev_attr.attr.name);
819 static int iio_device_register_sysfs(struct iio_dev *dev_info)
822 struct iio_dev_attr *p, *n;
824 if (dev_info->attrs) {
825 ret = sysfs_create_group(&dev_info->dev.kobj, dev_info->attrs);
827 dev_err(dev_info->dev.parent,
828 "Failed to register sysfs hooks\n");
834 * New channel registration method - relies on the fact a group does
835 * not need to be initialized if it is name is NULL.
837 INIT_LIST_HEAD(&dev_info->channel_attr_list);
838 if (dev_info->channels)
839 for (i = 0; i < dev_info->num_channels; i++) {
840 ret = iio_device_add_channel_sysfs(dev_info,
844 goto error_clear_attrs;
849 list_for_each_entry_safe(p, n,
850 &dev_info->channel_attr_list, l) {
852 iio_device_remove_and_free_read_attr(dev_info, p);
855 sysfs_remove_group(&dev_info->dev.kobj, dev_info->attrs);
861 static void iio_device_unregister_sysfs(struct iio_dev *dev_info)
864 struct iio_dev_attr *p, *n;
865 list_for_each_entry_safe(p, n, &dev_info->channel_attr_list, l) {
867 iio_device_remove_and_free_read_attr(dev_info, p);
871 sysfs_remove_group(&dev_info->dev.kobj, dev_info->attrs);
874 /* Return a negative errno on failure */
875 int iio_get_new_ida_val(struct ida *this_ida)
881 if (unlikely(ida_pre_get(this_ida, GFP_KERNEL) == 0))
884 spin_lock(&iio_ida_lock);
885 ret = ida_get_new(this_ida, &val);
886 spin_unlock(&iio_ida_lock);
887 if (unlikely(ret == -EAGAIN))
889 else if (unlikely(ret))
894 EXPORT_SYMBOL(iio_get_new_ida_val);
896 void iio_free_ida_val(struct ida *this_ida, int id)
898 spin_lock(&iio_ida_lock);
899 ida_remove(this_ida, id);
900 spin_unlock(&iio_ida_lock);
902 EXPORT_SYMBOL(iio_free_ida_val);
904 static int iio_device_register_id(struct iio_dev *dev_info,
905 struct ida *this_ida)
907 dev_info->id = iio_get_new_ida_val(&iio_ida);
908 if (dev_info->id < 0)
913 static void iio_device_unregister_id(struct iio_dev *dev_info)
915 iio_free_ida_val(&iio_ida, dev_info->id);
918 static const char * const iio_ev_type_text[] = {
919 [IIO_EV_TYPE_THRESH] = "thresh",
920 [IIO_EV_TYPE_MAG] = "mag",
921 [IIO_EV_TYPE_ROC] = "roc"
924 static const char * const iio_ev_dir_text[] = {
925 [IIO_EV_DIR_EITHER] = "either",
926 [IIO_EV_DIR_RISING] = "rising",
927 [IIO_EV_DIR_FALLING] = "falling"
930 static ssize_t iio_ev_state_store(struct device *dev,
931 struct device_attribute *attr,
935 struct iio_dev *indio_dev = dev_get_drvdata(dev);
936 struct iio_event_attr *this_attr = to_iio_event_attr(attr);
939 ret = strict_strtoul(buf, 10, &val);
940 if (ret || val < 0 || val > 1)
943 ret = indio_dev->write_event_config(indio_dev, this_attr->mask,
946 return (ret < 0) ? ret : len;
949 static ssize_t iio_ev_state_show(struct device *dev,
950 struct device_attribute *attr,
953 struct iio_dev *indio_dev = dev_get_drvdata(dev);
954 struct iio_event_attr *this_attr = to_iio_event_attr(attr);
955 int val = indio_dev->read_event_config(indio_dev, this_attr->mask);
960 return sprintf(buf, "%d\n", val);
963 static ssize_t iio_ev_value_show(struct device *dev,
964 struct device_attribute *attr,
967 struct iio_dev *indio_dev = dev_get_drvdata(dev);
968 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
971 ret = indio_dev->read_event_value(indio_dev,
972 this_attr->address, &val);
976 return sprintf(buf, "%d\n", val);
979 static ssize_t iio_ev_value_store(struct device *dev,
980 struct device_attribute *attr,
984 struct iio_dev *indio_dev = dev_get_drvdata(dev);
985 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
989 ret = strict_strtoul(buf, 10, &val);
993 ret = indio_dev->write_event_value(indio_dev, this_attr->address,
1001 static int __iio_add_chan_event_attr(const char *postfix,
1003 struct iio_chan_spec const *chan,
1006 struct list_head *attr_list)
1008 char *name_format, *full_postfix;
1010 struct iio_event_attr *iio_ev_attr;
1012 iio_ev_attr = kzalloc(sizeof *iio_ev_attr, GFP_KERNEL);
1013 if (iio_ev_attr == NULL) {
1018 sysfs_attr_init(&iio_ev_attr->dev_attr.attr);
1019 ret = __iio_build_postfix(chan, 0, postfix, &full_postfix);
1022 /* Special case for types that uses both channel numbers in naming */
1023 if (chan->type == IIO_IN_DIFF)
1025 = kasprintf(GFP_KERNEL, "%s_%s",
1026 iio_chan_type_name_spec_complex[chan->type],
1028 else if (!chan->indexed)
1030 = kasprintf(GFP_KERNEL, "%s_%s",
1031 iio_chan_type_name_spec_shared[chan->type],
1035 = kasprintf(GFP_KERNEL, "%s%d_%s",
1036 iio_chan_type_name_spec_shared[chan->type],
1039 if (name_format == NULL) {
1041 goto error_free_attr;
1044 iio_ev_attr->dev_attr.attr.name = kasprintf(GFP_KERNEL,
1048 if (iio_ev_attr->dev_attr.attr.name == NULL) {
1050 goto error_free_name_format;
1053 iio_ev_attr->dev_attr.attr.mode = S_IRUGO | S_IWUSR;
1054 iio_ev_attr->dev_attr.show = &iio_ev_state_show;
1055 iio_ev_attr->dev_attr.store = &iio_ev_state_store;
1056 iio_ev_attr->mask = mask;
1057 iio_ev_attr->listel = chan->shared_handler;
1058 ret = sysfs_add_file_to_group(&dev->kobj,
1059 &iio_ev_attr->dev_attr.attr,
1062 goto error_free_name;
1063 list_add(&iio_ev_attr->l, attr_list);
1068 kfree(iio_ev_attr->dev_attr.attr.name);
1069 error_free_name_format:
1078 static int iio_device_add_event_sysfs(struct iio_dev *dev_info,
1079 struct iio_chan_spec const *chan)
1082 int ret = 0, i, mask;
1084 if (!chan->event_mask)
1087 for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
1088 postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
1089 iio_ev_type_text[i/IIO_EV_TYPE_MAX],
1090 iio_ev_dir_text[i%IIO_EV_TYPE_MAX]);
1091 if (postfix == NULL) {
1095 switch (chan->type) {
1096 /* Switch this to a table at some point */
1098 mask = IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
1103 mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
1108 mask = IIO_MOD_EVENT_CODE(chan->type, chan->channel,
1114 printk(KERN_INFO "currently unhandled type of event\n");
1116 ret = __iio_add_chan_event_attr(postfix,
1120 /*HACK. - limits us to one
1121 event interface - fix by
1122 extending the bitmask - but
1124 &dev_info->event_interfaces[0]
1126 &dev_info->event_interfaces[0].
1132 postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
1133 iio_ev_type_text[i/IIO_EV_TYPE_MAX],
1134 iio_ev_dir_text[i%IIO_EV_TYPE_MAX]);
1135 if (postfix == NULL) {
1139 ret = __iio_add_chan_devattr(postfix, NULL, chan,
1144 &dev_info->event_interfaces[0]
1146 &dev_info->event_interfaces[0]
1158 static inline void __iio_remove_all_event_sysfs(struct iio_dev *dev_info,
1159 const char *groupname,
1162 struct iio_dev_attr *p, *n;
1163 struct iio_event_attr *q, *m;
1164 list_for_each_entry_safe(p, n,
1165 &dev_info->event_interfaces[num].
1167 sysfs_remove_file_from_group(&dev_info
1168 ->event_interfaces[num].dev.kobj,
1171 kfree(p->dev_attr.attr.name);
1174 list_for_each_entry_safe(q, m,
1175 &dev_info->event_interfaces[num].
1176 event_attr_list, l) {
1177 sysfs_remove_file_from_group(&dev_info
1178 ->event_interfaces[num].dev.kobj,
1181 kfree(q->dev_attr.attr.name);
1186 static inline int __iio_add_event_config_attrs(struct iio_dev *dev_info, int i)
1190 /*p for adding, q for removing */
1191 struct attribute **attrp, **attrq;
1193 if (dev_info->event_conf_attrs && dev_info->event_conf_attrs[i].attrs) {
1194 attrp = dev_info->event_conf_attrs[i].attrs;
1196 ret = sysfs_add_file_to_group(&dev_info
1197 ->event_interfaces[0]
1206 INIT_LIST_HEAD(&dev_info->event_interfaces[0].event_attr_list);
1207 INIT_LIST_HEAD(&dev_info->event_interfaces[0].dev_attr_list);
1208 /* Dynically created from the channels array */
1209 if (dev_info->channels) {
1210 for (j = 0; j < dev_info->num_channels; j++) {
1211 ret = iio_device_add_event_sysfs(dev_info,
1215 goto error_clear_attrs;
1221 __iio_remove_all_event_sysfs(dev_info,
1225 attrq = dev_info->event_conf_attrs[i].attrs;
1226 while (attrq != attrp) {
1227 sysfs_remove_file_from_group(&dev_info
1228 ->event_interfaces[0]
1238 static inline int __iio_remove_event_config_attrs(struct iio_dev *dev_info,
1241 struct attribute **attrq;
1242 __iio_remove_all_event_sysfs(dev_info,
1245 if (dev_info->event_conf_attrs
1246 && dev_info->event_conf_attrs[i].attrs) {
1247 attrq = dev_info->event_conf_attrs[i].attrs;
1249 sysfs_remove_file_from_group(&dev_info
1250 ->event_interfaces[0]
1261 static int iio_device_register_eventset(struct iio_dev *dev_info)
1265 if (dev_info->num_interrupt_lines == 0)
1268 dev_info->event_interfaces =
1269 kzalloc(sizeof(struct iio_event_interface)
1270 *dev_info->num_interrupt_lines,
1272 if (dev_info->event_interfaces == NULL) {
1277 dev_info->interrupts = kzalloc(sizeof(struct iio_interrupt *)
1278 *dev_info->num_interrupt_lines,
1280 if (dev_info->interrupts == NULL) {
1282 goto error_free_event_interfaces;
1285 for (i = 0; i < dev_info->num_interrupt_lines; i++) {
1286 dev_info->event_interfaces[i].owner = dev_info->driver_module;
1288 snprintf(dev_info->event_interfaces[i]._name, 20,
1290 dev_name(&dev_info->dev),
1293 ret = iio_setup_ev_int(&dev_info->event_interfaces[i],
1294 (const char *)(dev_info
1295 ->event_interfaces[i]
1297 dev_info->driver_module,
1300 dev_err(&dev_info->dev,
1301 "Could not get chrdev interface\n");
1302 goto error_free_setup_ev_ints;
1305 dev_set_drvdata(&dev_info->event_interfaces[i].dev,
1308 if (dev_info->event_attrs != NULL)
1309 ret = sysfs_create_group(&dev_info
1310 ->event_interfaces[i]
1312 &dev_info->event_attrs[i]);
1315 dev_err(&dev_info->dev,
1316 "Failed to register sysfs for event attrs");
1317 goto error_remove_sysfs_interfaces;
1321 for (i = 0; i < dev_info->num_interrupt_lines; i++) {
1322 ret = __iio_add_event_config_attrs(dev_info, i);
1324 goto error_unregister_config_attrs;
1329 error_unregister_config_attrs:
1330 for (j = 0; j < i; j++)
1331 __iio_remove_event_config_attrs(dev_info, i);
1332 i = dev_info->num_interrupt_lines - 1;
1333 error_remove_sysfs_interfaces:
1334 for (j = 0; j < i; j++)
1335 if (dev_info->event_attrs != NULL)
1336 sysfs_remove_group(&dev_info
1337 ->event_interfaces[j].dev.kobj,
1338 &dev_info->event_attrs[j]);
1339 error_free_setup_ev_ints:
1340 for (j = 0; j < i; j++)
1341 iio_free_ev_int(&dev_info->event_interfaces[j]);
1342 kfree(dev_info->interrupts);
1343 error_free_event_interfaces:
1344 kfree(dev_info->event_interfaces);
1350 static void iio_device_unregister_eventset(struct iio_dev *dev_info)
1354 if (dev_info->num_interrupt_lines == 0)
1356 for (i = 0; i < dev_info->num_interrupt_lines; i++) {
1357 __iio_remove_event_config_attrs(dev_info, i);
1358 if (dev_info->event_attrs != NULL)
1359 sysfs_remove_group(&dev_info
1360 ->event_interfaces[i].dev.kobj,
1361 &dev_info->event_attrs[i]);
1364 for (i = 0; i < dev_info->num_interrupt_lines; i++)
1365 iio_free_ev_int(&dev_info->event_interfaces[i]);
1366 kfree(dev_info->interrupts);
1367 kfree(dev_info->event_interfaces);
1370 static void iio_dev_release(struct device *device)
1372 struct iio_dev *dev = to_iio_dev(device);
1378 static struct device_type iio_dev_type = {
1379 .name = "iio_device",
1380 .release = iio_dev_release,
1383 struct iio_dev *iio_allocate_device(int sizeof_priv)
1385 struct iio_dev *dev;
1388 alloc_size = sizeof(struct iio_dev);
1390 alloc_size = ALIGN(alloc_size, IIO_ALIGN);
1391 alloc_size += sizeof_priv;
1393 /* ensure 32-byte alignment of whole construct ? */
1394 alloc_size += IIO_ALIGN - 1;
1396 dev = kzalloc(alloc_size, GFP_KERNEL);
1399 dev->dev.type = &iio_dev_type;
1400 dev->dev.bus = &iio_bus_type;
1401 device_initialize(&dev->dev);
1402 dev_set_drvdata(&dev->dev, (void *)dev);
1403 mutex_init(&dev->mlock);
1409 EXPORT_SYMBOL(iio_allocate_device);
1411 void iio_free_device(struct iio_dev *dev)
1414 iio_put_device(dev);
1416 EXPORT_SYMBOL(iio_free_device);
1418 int iio_device_register(struct iio_dev *dev_info)
1422 ret = iio_device_register_id(dev_info, &iio_ida);
1424 dev_err(&dev_info->dev, "Failed to get id\n");
1427 dev_set_name(&dev_info->dev, "device%d", dev_info->id);
1429 ret = device_add(&dev_info->dev);
1431 goto error_free_ida;
1432 ret = iio_device_register_sysfs(dev_info);
1434 dev_err(dev_info->dev.parent,
1435 "Failed to register sysfs interfaces\n");
1436 goto error_del_device;
1438 ret = iio_device_register_eventset(dev_info);
1440 dev_err(dev_info->dev.parent,
1441 "Failed to register event set\n");
1442 goto error_free_sysfs;
1444 if (dev_info->modes & INDIO_RING_TRIGGERED)
1445 iio_device_register_trigger_consumer(dev_info);
1450 iio_device_unregister_sysfs(dev_info);
1452 device_del(&dev_info->dev);
1454 iio_device_unregister_id(dev_info);
1458 EXPORT_SYMBOL(iio_device_register);
1460 void iio_device_unregister(struct iio_dev *dev_info)
1462 if (dev_info->modes & INDIO_RING_TRIGGERED)
1463 iio_device_unregister_trigger_consumer(dev_info);
1464 iio_device_unregister_eventset(dev_info);
1465 iio_device_unregister_sysfs(dev_info);
1466 iio_device_unregister_id(dev_info);
1467 device_unregister(&dev_info->dev);
1469 EXPORT_SYMBOL(iio_device_unregister);
1473 module_put(THIS_MODULE);
1478 __module_get(THIS_MODULE);
1481 subsys_initcall(iio_init);
1482 module_exit(iio_exit);
1484 MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
1485 MODULE_DESCRIPTION("Industrial I/O core");
1486 MODULE_LICENSE("GPL");