1 /* The industrial I/O simple minimally locked ring buffer.
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/workqueue.h>
16 static inline int __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
17 int bytes_per_datum, int length)
19 if ((length == 0) || (bytes_per_datum == 0))
22 __iio_init_ring_buffer(&ring->buf, bytes_per_datum, length);
23 spin_lock_init(&ring->use_lock);
24 ring->data = kmalloc(length*ring->buf.bpd, GFP_KERNEL);
27 ring->last_written_p = 0;
29 return ring->data ? 0 : -ENOMEM;
32 static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
37 void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r)
39 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
40 spin_lock(&ring->use_lock);
42 spin_unlock(&ring->use_lock);
44 EXPORT_SYMBOL(iio_mark_sw_rb_in_use);
46 void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r)
48 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
49 spin_lock(&ring->use_lock);
51 spin_unlock(&ring->use_lock);
53 EXPORT_SYMBOL(iio_unmark_sw_rb_in_use);
56 /* Ring buffer related functionality */
57 /* Store to ring is typically called in the bh of a data ready interrupt handler
58 * in the device driver */
59 /* Lock always held if their is a chance this may be called */
60 /* Only one of these per ring may run concurrently - enforced by drivers */
61 int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
67 unsigned char *temp_ptr, *change_test_ptr;
70 if (unlikely(ring->write_p == 0)) {
71 ring->write_p = ring->data;
72 /* Doesn't actually matter if this is out of the set
73 * as long as the read pointer is valid before this
74 * passes it - guaranteed as set later in this function.
76 ring->half_p = ring->data - ring->buf.length*ring->buf.bpd/2;
78 /* Copy data to where ever the current write pointer says */
79 memcpy(ring->write_p, data, ring->buf.bpd);
81 /* Update the pointer used to get most recent value.
82 * Always valid as either points to latest or second latest value.
83 * Before this runs it is null and read attempts fail with -EAGAIN.
85 ring->last_written_p = ring->write_p;
87 /* temp_ptr used to ensure we never have an invalid pointer
88 * it may be slightly lagging, but never invalid
90 temp_ptr = ring->write_p + ring->buf.bpd;
91 /* End of ring, back to the beginning */
92 if (temp_ptr == ring->data + ring->buf.length*ring->buf.bpd)
93 temp_ptr = ring->data;
94 /* Update the write pointer
95 * always valid as long as this is the only function able to write.
96 * Care needed with smp systems to ensure more than one ring fill
99 ring->write_p = temp_ptr;
101 if (ring->read_p == 0)
102 ring->read_p = ring->data;
103 /* Buffer full - move the read pointer and create / escalate
105 /* Tricky case - if the read pointer moves before we adjust it.
106 * Handle by not pushing if it has moved - may result in occasional
107 * unnecessary buffer full events when it wasn't quite true.
109 else if (ring->write_p == ring->read_p) {
110 change_test_ptr = ring->read_p;
111 temp_ptr = change_test_ptr + ring->buf.bpd;
113 == ring->data + ring->buf.length*ring->buf.bpd) {
114 temp_ptr = ring->data;
116 /* We are moving pointer on one because the ring is full. Any
117 * change to the read pointer will be this or greater.
119 if (change_test_ptr == ring->read_p)
120 ring->read_p = temp_ptr;
122 spin_lock(&ring->buf.shared_ev_pointer.lock);
124 ret = iio_push_or_escallate_ring_event(&ring->buf,
125 IIO_EVENT_CODE_RING_100_FULL,
127 spin_unlock(&ring->buf.shared_ev_pointer.lock);
131 /* investigate if our event barrier has been passed */
132 /* There are definite 'issues' with this and chances of
133 * simultaneous read */
134 /* Also need to use loop count to ensure this only happens once */
135 ring->half_p += ring->buf.bpd;
136 if (ring->half_p == ring->data + ring->buf.length*ring->buf.bpd)
137 ring->half_p = ring->data;
138 if (ring->half_p == ring->read_p) {
139 spin_lock(&ring->buf.shared_ev_pointer.lock);
140 code = IIO_EVENT_CODE_RING_50_FULL;
141 ret = __iio_push_event(&ring->buf.ev_int,
144 &ring->buf.shared_ev_pointer);
145 spin_unlock(&ring->buf.shared_ev_pointer.lock);
151 int iio_rip_sw_rb(struct iio_ring_buffer *r,
152 size_t count, u8 **data, int *dead_offset)
154 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
156 u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
160 /* A userspace program has probably made an error if it tries to
161 * read something that is not a whole number of bpds.
164 if (count % ring->buf.bpd) {
166 printk(KERN_INFO "Ring buffer read request not whole number of"
167 "samples: Request bytes %zd, Current bpd %d\n",
168 count, ring->buf.bpd);
171 /* Limit size to whole of ring buffer */
172 bytes_to_rip = min((size_t)(ring->buf.bpd*ring->buf.length), count);
174 *data = kmalloc(bytes_to_rip, GFP_KERNEL);
180 /* build local copy */
181 initial_read_p = ring->read_p;
182 if (unlikely(initial_read_p == 0)) { /* No data here as yet */
184 goto error_free_data_cpy;
187 initial_write_p = ring->write_p;
189 /* Need a consistent pair */
190 while ((initial_read_p != ring->read_p)
191 || (initial_write_p != ring->write_p)) {
192 initial_read_p = ring->read_p;
193 initial_write_p = ring->write_p;
195 if (initial_write_p == initial_read_p) {
196 /* No new data available.*/
198 goto error_free_data_cpy;
201 if (initial_write_p >= initial_read_p + bytes_to_rip) {
202 /* write_p is greater than necessary, all is easy */
203 max_copied = bytes_to_rip;
204 memcpy(*data, initial_read_p, max_copied);
205 end_read_p = initial_read_p + max_copied;
206 } else if (initial_write_p > initial_read_p) {
207 /*not enough data to cpy */
208 max_copied = initial_write_p - initial_read_p;
209 memcpy(*data, initial_read_p, max_copied);
210 end_read_p = initial_write_p;
212 /* going through 'end' of ring buffer */
213 max_copied = ring->data
214 + ring->buf.length*ring->buf.bpd - initial_read_p;
215 memcpy(*data, initial_read_p, max_copied);
216 /* possible we are done if we align precisely with end */
217 if (max_copied == bytes_to_rip)
218 end_read_p = ring->data;
219 else if (initial_write_p
220 > ring->data + bytes_to_rip - max_copied) {
221 /* enough data to finish */
222 memcpy(*data + max_copied, ring->data,
223 bytes_to_rip - max_copied);
224 max_copied = bytes_to_rip;
225 end_read_p = ring->data + (bytes_to_rip - max_copied);
226 } else { /* not enough data */
227 memcpy(*data + max_copied, ring->data,
228 initial_write_p - ring->data);
229 max_copied += initial_write_p - ring->data;
230 end_read_p = initial_write_p;
233 /* Now to verify which section was cleanly copied - i.e. how far
234 * read pointer has been pushed */
235 current_read_p = ring->read_p;
237 if (initial_read_p <= current_read_p)
238 *dead_offset = current_read_p - initial_read_p;
240 *dead_offset = ring->buf.length*ring->buf.bpd
241 - (initial_read_p - current_read_p);
243 /* possible issue if the initial write has been lapped or indeed
244 * the point we were reading to has been passed */
245 /* No valid data read.
246 * In this case the read pointer is already correct having been
247 * pushed further than we would look. */
248 if (max_copied - *dead_offset < 0) {
250 goto error_free_data_cpy;
253 /* setup the next read position */
254 /* Beware, this may fail due to concurrency fun and games.
255 * Possible that sufficient fill commands have run to push the read
256 * pointer past where we would be after the rip. If this occurs, leave
259 /* Tricky - deal with loops */
261 while (ring->read_p != end_read_p)
262 ring->read_p = end_read_p;
264 return max_copied - *dead_offset;
271 EXPORT_SYMBOL(iio_rip_sw_rb);
273 int iio_store_to_sw_rb(struct iio_ring_buffer *r, u8 *data, s64 timestamp)
275 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
276 return iio_store_to_sw_ring(ring, data, timestamp);
278 EXPORT_SYMBOL(iio_store_to_sw_rb);
280 int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring,
283 unsigned char *last_written_p_copy;
285 iio_mark_sw_rb_in_use(&ring->buf);
288 last_written_p_copy = ring->last_written_p;
289 barrier(); /*unnessecary? */
290 /* Check there is anything here */
291 if (last_written_p_copy == 0)
293 memcpy(data, last_written_p_copy, ring->buf.bpd);
295 if (unlikely(ring->last_written_p >= last_written_p_copy))
298 iio_unmark_sw_rb_in_use(&ring->buf);
302 int iio_read_last_from_sw_rb(struct iio_ring_buffer *r,
305 return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data);
307 EXPORT_SYMBOL(iio_read_last_from_sw_rb);
309 int iio_request_update_sw_rb(struct iio_ring_buffer *r)
312 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
314 spin_lock(&ring->use_lock);
315 if (!ring->update_needed)
317 if (ring->use_count) {
321 __iio_free_sw_ring_buffer(ring);
322 ret = __iio_init_sw_ring_buffer(ring, ring->buf.bpd, ring->buf.length);
324 spin_unlock(&ring->use_lock);
327 EXPORT_SYMBOL(iio_request_update_sw_rb);
329 int iio_get_bpd_sw_rb(struct iio_ring_buffer *r)
331 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
332 return ring->buf.bpd;
334 EXPORT_SYMBOL(iio_get_bpd_sw_rb);
336 int iio_set_bpd_sw_rb(struct iio_ring_buffer *r, size_t bpd)
340 if (r->access.mark_param_change)
341 r->access.mark_param_change(r);
345 EXPORT_SYMBOL(iio_set_bpd_sw_rb);
347 int iio_get_length_sw_rb(struct iio_ring_buffer *r)
351 EXPORT_SYMBOL(iio_get_length_sw_rb);
353 int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length)
355 if (r->length != length) {
357 if (r->access.mark_param_change)
358 r->access.mark_param_change(r);
362 EXPORT_SYMBOL(iio_set_length_sw_rb);
364 int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r)
366 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
367 ring->update_needed = true;
370 EXPORT_SYMBOL(iio_mark_update_needed_sw_rb);
372 static void iio_sw_rb_release(struct device *dev)
374 struct iio_ring_buffer *r = to_iio_ring_buffer(dev);
375 kfree(iio_to_sw_ring(r));
378 static IIO_RING_ENABLE_ATTR;
379 static IIO_RING_BPS_ATTR;
380 static IIO_RING_LENGTH_ATTR;
382 /* Standard set of ring buffer attributes */
383 static struct attribute *iio_ring_attributes[] = {
384 &dev_attr_length.attr,
386 &dev_attr_ring_enable.attr,
390 static struct attribute_group iio_ring_attribute_group = {
391 .attrs = iio_ring_attributes,
394 static const struct attribute_group *iio_ring_attribute_groups[] = {
395 &iio_ring_attribute_group,
399 static struct device_type iio_sw_ring_type = {
400 .release = iio_sw_rb_release,
401 .groups = iio_ring_attribute_groups,
404 struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
406 struct iio_ring_buffer *buf;
407 struct iio_sw_ring_buffer *ring;
409 ring = kzalloc(sizeof *ring, GFP_KERNEL);
414 iio_ring_buffer_init(buf, indio_dev);
415 buf->dev.type = &iio_sw_ring_type;
416 device_initialize(&buf->dev);
417 buf->dev.parent = &indio_dev->dev;
418 buf->dev.class = &iio_class;
419 dev_set_drvdata(&buf->dev, (void *)buf);
423 EXPORT_SYMBOL(iio_sw_rb_allocate);
425 void iio_sw_rb_free(struct iio_ring_buffer *r)
428 iio_put_ring_buffer(r);
430 EXPORT_SYMBOL(iio_sw_rb_free);
431 MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
432 MODULE_LICENSE("GPL");