1 /* The industrial I/O simple minimally locked ring buffer.
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/workqueue.h>
17 static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
18 int bytes_per_datum, int length)
20 if ((length == 0) || (bytes_per_datum == 0))
22 __iio_update_ring_buffer(&ring->buf, bytes_per_datum, length);
23 ring->data = kmalloc(length*ring->buf.bpd, GFP_KERNEL);
26 ring->last_written_p = 0;
28 return ring->data ? 0 : -ENOMEM;
31 static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
33 spin_lock_init(&ring->use_lock);
36 static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
41 void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r)
43 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
44 spin_lock(&ring->use_lock);
46 spin_unlock(&ring->use_lock);
48 EXPORT_SYMBOL(iio_mark_sw_rb_in_use);
50 void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r)
52 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
53 spin_lock(&ring->use_lock);
55 spin_unlock(&ring->use_lock);
57 EXPORT_SYMBOL(iio_unmark_sw_rb_in_use);
60 /* Ring buffer related functionality */
61 /* Store to ring is typically called in the bh of a data ready interrupt handler
62 * in the device driver */
63 /* Lock always held if their is a chance this may be called */
64 /* Only one of these per ring may run concurrently - enforced by drivers */
65 int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
71 unsigned char *temp_ptr, *change_test_ptr;
74 if (unlikely(ring->write_p == 0)) {
75 ring->write_p = ring->data;
76 /* Doesn't actually matter if this is out of the set
77 * as long as the read pointer is valid before this
78 * passes it - guaranteed as set later in this function.
80 ring->half_p = ring->data - ring->buf.length*ring->buf.bpd/2;
82 /* Copy data to where ever the current write pointer says */
83 memcpy(ring->write_p, data, ring->buf.bpd);
85 /* Update the pointer used to get most recent value.
86 * Always valid as either points to latest or second latest value.
87 * Before this runs it is null and read attempts fail with -EAGAIN.
89 ring->last_written_p = ring->write_p;
91 /* temp_ptr used to ensure we never have an invalid pointer
92 * it may be slightly lagging, but never invalid
94 temp_ptr = ring->write_p + ring->buf.bpd;
95 /* End of ring, back to the beginning */
96 if (temp_ptr == ring->data + ring->buf.length*ring->buf.bpd)
97 temp_ptr = ring->data;
98 /* Update the write pointer
99 * always valid as long as this is the only function able to write.
100 * Care needed with smp systems to ensure more than one ring fill
101 * is never scheduled.
103 ring->write_p = temp_ptr;
105 if (ring->read_p == 0)
106 ring->read_p = ring->data;
107 /* Buffer full - move the read pointer and create / escalate
109 /* Tricky case - if the read pointer moves before we adjust it.
110 * Handle by not pushing if it has moved - may result in occasional
111 * unnecessary buffer full events when it wasn't quite true.
113 else if (ring->write_p == ring->read_p) {
114 change_test_ptr = ring->read_p;
115 temp_ptr = change_test_ptr + ring->buf.bpd;
117 == ring->data + ring->buf.length*ring->buf.bpd) {
118 temp_ptr = ring->data;
120 /* We are moving pointer on one because the ring is full. Any
121 * change to the read pointer will be this or greater.
123 if (change_test_ptr == ring->read_p)
124 ring->read_p = temp_ptr;
126 spin_lock(&ring->buf.shared_ev_pointer.lock);
128 ret = iio_push_or_escallate_ring_event(&ring->buf,
129 IIO_EVENT_CODE_RING_100_FULL,
131 spin_unlock(&ring->buf.shared_ev_pointer.lock);
135 /* investigate if our event barrier has been passed */
136 /* There are definite 'issues' with this and chances of
137 * simultaneous read */
138 /* Also need to use loop count to ensure this only happens once */
139 ring->half_p += ring->buf.bpd;
140 if (ring->half_p == ring->data + ring->buf.length*ring->buf.bpd)
141 ring->half_p = ring->data;
142 if (ring->half_p == ring->read_p) {
143 spin_lock(&ring->buf.shared_ev_pointer.lock);
144 code = IIO_EVENT_CODE_RING_50_FULL;
145 ret = __iio_push_event(&ring->buf.ev_int,
148 &ring->buf.shared_ev_pointer);
149 spin_unlock(&ring->buf.shared_ev_pointer.lock);
155 int iio_rip_sw_rb(struct iio_ring_buffer *r,
156 size_t count, u8 **data, int *dead_offset)
158 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
160 u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
164 /* A userspace program has probably made an error if it tries to
165 * read something that is not a whole number of bpds.
168 if (count % ring->buf.bpd) {
170 printk(KERN_INFO "Ring buffer read request not whole number of"
171 "samples: Request bytes %zd, Current bpd %d\n",
172 count, ring->buf.bpd);
175 /* Limit size to whole of ring buffer */
176 bytes_to_rip = min((size_t)(ring->buf.bpd*ring->buf.length), count);
178 *data = kmalloc(bytes_to_rip, GFP_KERNEL);
184 /* build local copy */
185 initial_read_p = ring->read_p;
186 if (unlikely(initial_read_p == 0)) { /* No data here as yet */
188 goto error_free_data_cpy;
191 initial_write_p = ring->write_p;
193 /* Need a consistent pair */
194 while ((initial_read_p != ring->read_p)
195 || (initial_write_p != ring->write_p)) {
196 initial_read_p = ring->read_p;
197 initial_write_p = ring->write_p;
199 if (initial_write_p == initial_read_p) {
200 /* No new data available.*/
202 goto error_free_data_cpy;
205 if (initial_write_p >= initial_read_p + bytes_to_rip) {
206 /* write_p is greater than necessary, all is easy */
207 max_copied = bytes_to_rip;
208 memcpy(*data, initial_read_p, max_copied);
209 end_read_p = initial_read_p + max_copied;
210 } else if (initial_write_p > initial_read_p) {
211 /*not enough data to cpy */
212 max_copied = initial_write_p - initial_read_p;
213 memcpy(*data, initial_read_p, max_copied);
214 end_read_p = initial_write_p;
216 /* going through 'end' of ring buffer */
217 max_copied = ring->data
218 + ring->buf.length*ring->buf.bpd - initial_read_p;
219 memcpy(*data, initial_read_p, max_copied);
220 /* possible we are done if we align precisely with end */
221 if (max_copied == bytes_to_rip)
222 end_read_p = ring->data;
223 else if (initial_write_p
224 > ring->data + bytes_to_rip - max_copied) {
225 /* enough data to finish */
226 memcpy(*data + max_copied, ring->data,
227 bytes_to_rip - max_copied);
228 max_copied = bytes_to_rip;
229 end_read_p = ring->data + (bytes_to_rip - max_copied);
230 } else { /* not enough data */
231 memcpy(*data + max_copied, ring->data,
232 initial_write_p - ring->data);
233 max_copied += initial_write_p - ring->data;
234 end_read_p = initial_write_p;
237 /* Now to verify which section was cleanly copied - i.e. how far
238 * read pointer has been pushed */
239 current_read_p = ring->read_p;
241 if (initial_read_p <= current_read_p)
242 *dead_offset = current_read_p - initial_read_p;
244 *dead_offset = ring->buf.length*ring->buf.bpd
245 - (initial_read_p - current_read_p);
247 /* possible issue if the initial write has been lapped or indeed
248 * the point we were reading to has been passed */
249 /* No valid data read.
250 * In this case the read pointer is already correct having been
251 * pushed further than we would look. */
252 if (max_copied - *dead_offset < 0) {
254 goto error_free_data_cpy;
257 /* setup the next read position */
258 /* Beware, this may fail due to concurrency fun and games.
259 * Possible that sufficient fill commands have run to push the read
260 * pointer past where we would be after the rip. If this occurs, leave
263 /* Tricky - deal with loops */
265 while (ring->read_p != end_read_p)
266 ring->read_p = end_read_p;
268 return max_copied - *dead_offset;
275 EXPORT_SYMBOL(iio_rip_sw_rb);
277 int iio_store_to_sw_rb(struct iio_ring_buffer *r, u8 *data, s64 timestamp)
279 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
280 return iio_store_to_sw_ring(ring, data, timestamp);
282 EXPORT_SYMBOL(iio_store_to_sw_rb);
284 int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring,
287 unsigned char *last_written_p_copy;
289 iio_mark_sw_rb_in_use(&ring->buf);
292 last_written_p_copy = ring->last_written_p;
293 barrier(); /*unnessecary? */
294 /* Check there is anything here */
295 if (last_written_p_copy == 0)
297 memcpy(data, last_written_p_copy, ring->buf.bpd);
299 if (unlikely(ring->last_written_p >= last_written_p_copy))
302 iio_unmark_sw_rb_in_use(&ring->buf);
306 int iio_read_last_from_sw_rb(struct iio_ring_buffer *r,
309 return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data);
311 EXPORT_SYMBOL(iio_read_last_from_sw_rb);
313 int iio_request_update_sw_rb(struct iio_ring_buffer *r)
316 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
318 spin_lock(&ring->use_lock);
319 if (!ring->update_needed)
321 if (ring->use_count) {
325 __iio_free_sw_ring_buffer(ring);
326 ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bpd,
329 spin_unlock(&ring->use_lock);
332 EXPORT_SYMBOL(iio_request_update_sw_rb);
334 int iio_get_bpd_sw_rb(struct iio_ring_buffer *r)
336 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
337 return ring->buf.bpd;
339 EXPORT_SYMBOL(iio_get_bpd_sw_rb);
341 int iio_set_bpd_sw_rb(struct iio_ring_buffer *r, size_t bpd)
345 if (r->access.mark_param_change)
346 r->access.mark_param_change(r);
350 EXPORT_SYMBOL(iio_set_bpd_sw_rb);
352 int iio_get_length_sw_rb(struct iio_ring_buffer *r)
356 EXPORT_SYMBOL(iio_get_length_sw_rb);
358 int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length)
360 if (r->length != length) {
362 if (r->access.mark_param_change)
363 r->access.mark_param_change(r);
367 EXPORT_SYMBOL(iio_set_length_sw_rb);
369 int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r)
371 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
372 ring->update_needed = true;
375 EXPORT_SYMBOL(iio_mark_update_needed_sw_rb);
377 static void iio_sw_rb_release(struct device *dev)
379 struct iio_ring_buffer *r = to_iio_ring_buffer(dev);
380 kfree(iio_to_sw_ring(r));
383 static IIO_RING_ENABLE_ATTR;
384 static IIO_RING_BPS_ATTR;
385 static IIO_RING_LENGTH_ATTR;
387 /* Standard set of ring buffer attributes */
388 static struct attribute *iio_ring_attributes[] = {
389 &dev_attr_length.attr,
391 &dev_attr_ring_enable.attr,
395 static struct attribute_group iio_ring_attribute_group = {
396 .attrs = iio_ring_attributes,
399 static const struct attribute_group *iio_ring_attribute_groups[] = {
400 &iio_ring_attribute_group,
404 static struct device_type iio_sw_ring_type = {
405 .release = iio_sw_rb_release,
406 .groups = iio_ring_attribute_groups,
409 struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
411 struct iio_ring_buffer *buf;
412 struct iio_sw_ring_buffer *ring;
414 ring = kzalloc(sizeof *ring, GFP_KERNEL);
418 iio_ring_buffer_init(buf, indio_dev);
419 __iio_init_sw_ring_buffer(ring);
420 buf->dev.type = &iio_sw_ring_type;
421 device_initialize(&buf->dev);
422 buf->dev.parent = &indio_dev->dev;
423 buf->dev.class = &iio_class;
424 dev_set_drvdata(&buf->dev, (void *)buf);
428 EXPORT_SYMBOL(iio_sw_rb_allocate);
430 void iio_sw_rb_free(struct iio_ring_buffer *r)
433 iio_put_ring_buffer(r);
435 EXPORT_SYMBOL(iio_sw_rb_free);
436 MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
437 MODULE_LICENSE("GPL");