Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
[pandora-kernel.git] / drivers / iio / buffer_cb.c
1 #include <linux/kernel.h>
2 #include <linux/slab.h>
3 #include <linux/err.h>
4 #include <linux/export.h>
5 #include <linux/iio/buffer.h>
6 #include <linux/iio/consumer.h>
7
8 struct iio_cb_buffer {
9         struct iio_buffer buffer;
10         int (*cb)(const void *data, void *private);
11         void *private;
12         struct iio_channel *channels;
13 };
14
15 static struct iio_cb_buffer *buffer_to_cb_buffer(struct iio_buffer *buffer)
16 {
17         return container_of(buffer, struct iio_cb_buffer, buffer);
18 }
19
20 static int iio_buffer_cb_store_to(struct iio_buffer *buffer, const void *data)
21 {
22         struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
23         return cb_buff->cb(data, cb_buff->private);
24 }
25
26 static void iio_buffer_cb_release(struct iio_buffer *buffer)
27 {
28         struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
29         kfree(cb_buff->buffer.scan_mask);
30         kfree(cb_buff);
31 }
32
33 static const struct iio_buffer_access_funcs iio_cb_access = {
34         .store_to = &iio_buffer_cb_store_to,
35         .release = &iio_buffer_cb_release,
36 };
37
38 struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
39                                              int (*cb)(const void *data,
40                                                        void *private),
41                                              void *private)
42 {
43         int ret;
44         struct iio_cb_buffer *cb_buff;
45         struct iio_dev *indio_dev;
46         struct iio_channel *chan;
47
48         cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
49         if (cb_buff == NULL) {
50                 ret = -ENOMEM;
51                 goto error_ret;
52         }
53
54         iio_buffer_init(&cb_buff->buffer);
55
56         cb_buff->private = private;
57         cb_buff->cb = cb;
58         cb_buff->buffer.access = &iio_cb_access;
59         INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
60
61         cb_buff->channels = iio_channel_get_all(dev);
62         if (IS_ERR(cb_buff->channels)) {
63                 ret = PTR_ERR(cb_buff->channels);
64                 goto error_free_cb_buff;
65         }
66
67         indio_dev = cb_buff->channels[0].indio_dev;
68         cb_buff->buffer.scan_mask
69                 = kcalloc(BITS_TO_LONGS(indio_dev->masklength), sizeof(long),
70                           GFP_KERNEL);
71         if (cb_buff->buffer.scan_mask == NULL) {
72                 ret = -ENOMEM;
73                 goto error_release_channels;
74         }
75         chan = &cb_buff->channels[0];
76         while (chan->indio_dev) {
77                 if (chan->indio_dev != indio_dev) {
78                         ret = -EINVAL;
79                         goto error_free_scan_mask;
80                 }
81                 set_bit(chan->channel->scan_index,
82                         cb_buff->buffer.scan_mask);
83                 chan++;
84         }
85
86         return cb_buff;
87
88 error_free_scan_mask:
89         kfree(cb_buff->buffer.scan_mask);
90 error_release_channels:
91         iio_channel_release_all(cb_buff->channels);
92 error_free_cb_buff:
93         kfree(cb_buff);
94 error_ret:
95         return ERR_PTR(ret);
96 }
97 EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
98
99 int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
100 {
101         return iio_update_buffers(cb_buff->channels[0].indio_dev,
102                                   &cb_buff->buffer,
103                                   NULL);
104 }
105 EXPORT_SYMBOL_GPL(iio_channel_start_all_cb);
106
107 void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff)
108 {
109         iio_update_buffers(cb_buff->channels[0].indio_dev,
110                            NULL,
111                            &cb_buff->buffer);
112 }
113 EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
114
115 void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
116 {
117         iio_channel_release_all(cb_buff->channels);
118         iio_buffer_put(&cb_buff->buffer);
119 }
120 EXPORT_SYMBOL_GPL(iio_channel_release_all_cb);
121
122 struct iio_channel
123 *iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer)
124 {
125         return cb_buffer->channels;
126 }
127 EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);