staging:iio: fix removal path to allow correct freeing.
[deliverable/linux.git] / drivers / staging / iio / ring_sw.c
CommitLineData
2235acb2
JC
1/* The industrial I/O simple minimally locked ring buffer.
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
5a0e3ad6 10#include <linux/slab.h>
2235acb2 11#include <linux/kernel.h>
2235acb2
JC
12#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/workqueue.h>
a7348347 15#include <linux/sched.h>
d5857d65 16#include <linux/poll.h>
2235acb2 17#include "ring_sw.h"
59883ba1 18#include "trigger.h"
2235acb2 19
5565a450
JC
20/**
21 * struct iio_sw_ring_buffer - software ring buffer
22 * @buf: generic ring buffer elements
23 * @data: the ring buffer memory
24 * @read_p: read pointer (oldest available)
25 * @write_p: write pointer
26 * @last_written_p: read pointer (newest available)
27 * @half_p: half buffer length behind write_p (event generation)
28 * @use_count: reference count to prevent resizing when in use
29 * @update_needed: flag to indicated change in size requested
30 * @use_lock: lock to prevent change in size when in use
31 *
32 * Note that the first element of all ring buffers must be a
14555b14 33 * struct iio_buffer.
5565a450
JC
34**/
35struct iio_sw_ring_buffer {
14555b14 36 struct iio_buffer buf;
5565a450
JC
37 unsigned char *data;
38 unsigned char *read_p;
39 unsigned char *write_p;
40 unsigned char *last_written_p;
41 /* used to act as a point at which to signal an event */
42 unsigned char *half_p;
43 int use_count;
44 int update_needed;
45 spinlock_t use_lock;
46};
47
48#define iio_to_sw_ring(r) container_of(r, struct iio_sw_ring_buffer, buf)
49
6f2dfb31
JC
50static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
51 int bytes_per_datum, int length)
2235acb2
JC
52{
53 if ((length == 0) || (bytes_per_datum == 0))
54 return -EINVAL;
14555b14 55 __iio_update_buffer(&ring->buf, bytes_per_datum, length);
ffcab07a 56 ring->data = kmalloc(length*ring->buf.bytes_per_datum, GFP_ATOMIC);
19ca92e0
GKH
57 ring->read_p = NULL;
58 ring->write_p = NULL;
59 ring->last_written_p = NULL;
60 ring->half_p = NULL;
2235acb2
JC
61 return ring->data ? 0 : -ENOMEM;
62}
63
6f2dfb31
JC
64static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
65{
66 spin_lock_init(&ring->use_lock);
67}
68
2235acb2
JC
69static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
70{
71 kfree(ring->data);
72}
73
14555b14 74static void iio_mark_sw_rb_in_use(struct iio_buffer *r)
2235acb2
JC
75{
76 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
77 spin_lock(&ring->use_lock);
78 ring->use_count++;
79 spin_unlock(&ring->use_lock);
80}
2235acb2 81
14555b14 82static void iio_unmark_sw_rb_in_use(struct iio_buffer *r)
2235acb2
JC
83{
84 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
85 spin_lock(&ring->use_lock);
86 ring->use_count--;
87 spin_unlock(&ring->use_lock);
88}
2235acb2
JC
89
90
91/* Ring buffer related functionality */
92/* Store to ring is typically called in the bh of a data ready interrupt handler
93 * in the device driver */
94/* Lock always held if their is a chance this may be called */
95/* Only one of these per ring may run concurrently - enforced by drivers */
19ca92e0
GKH
96static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
97 unsigned char *data, s64 timestamp)
2235acb2
JC
98{
99 int ret = 0;
2235acb2
JC
100 unsigned char *temp_ptr, *change_test_ptr;
101
102 /* initial store */
19ca92e0 103 if (unlikely(ring->write_p == NULL)) {
2235acb2
JC
104 ring->write_p = ring->data;
105 /* Doesn't actually matter if this is out of the set
106 * as long as the read pointer is valid before this
107 * passes it - guaranteed as set later in this function.
108 */
ffcab07a 109 ring->half_p = ring->data - ring->buf.length*ring->buf.bytes_per_datum/2;
2235acb2
JC
110 }
111 /* Copy data to where ever the current write pointer says */
ffcab07a 112 memcpy(ring->write_p, data, ring->buf.bytes_per_datum);
2235acb2
JC
113 barrier();
114 /* Update the pointer used to get most recent value.
115 * Always valid as either points to latest or second latest value.
116 * Before this runs it is null and read attempts fail with -EAGAIN.
117 */
118 ring->last_written_p = ring->write_p;
119 barrier();
120 /* temp_ptr used to ensure we never have an invalid pointer
121 * it may be slightly lagging, but never invalid
122 */
ffcab07a 123 temp_ptr = ring->write_p + ring->buf.bytes_per_datum;
2235acb2 124 /* End of ring, back to the beginning */
ffcab07a 125 if (temp_ptr == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
2235acb2
JC
126 temp_ptr = ring->data;
127 /* Update the write pointer
128 * always valid as long as this is the only function able to write.
129 * Care needed with smp systems to ensure more than one ring fill
130 * is never scheduled.
131 */
132 ring->write_p = temp_ptr;
133
19ca92e0 134 if (ring->read_p == NULL)
2235acb2
JC
135 ring->read_p = ring->data;
136 /* Buffer full - move the read pointer and create / escalate
137 * ring event */
138 /* Tricky case - if the read pointer moves before we adjust it.
139 * Handle by not pushing if it has moved - may result in occasional
140 * unnecessary buffer full events when it wasn't quite true.
141 */
142 else if (ring->write_p == ring->read_p) {
143 change_test_ptr = ring->read_p;
ffcab07a 144 temp_ptr = change_test_ptr + ring->buf.bytes_per_datum;
2235acb2 145 if (temp_ptr
ffcab07a 146 == ring->data + ring->buf.length*ring->buf.bytes_per_datum) {
2235acb2
JC
147 temp_ptr = ring->data;
148 }
149 /* We are moving pointer on one because the ring is full. Any
150 * change to the read pointer will be this or greater.
151 */
152 if (change_test_ptr == ring->read_p)
153 ring->read_p = temp_ptr;
2235acb2
JC
154 }
155 /* investigate if our event barrier has been passed */
156 /* There are definite 'issues' with this and chances of
157 * simultaneous read */
158 /* Also need to use loop count to ensure this only happens once */
ffcab07a
MS
159 ring->half_p += ring->buf.bytes_per_datum;
160 if (ring->half_p == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
2235acb2
JC
161 ring->half_p = ring->data;
162 if (ring->half_p == ring->read_p) {
a7348347
JC
163 ring->buf.stufftoread = true;
164 wake_up_interruptible(&ring->buf.pollq);
2235acb2 165 }
2235acb2
JC
166 return ret;
167}
168
14555b14 169static int iio_read_first_n_sw_rb(struct iio_buffer *r,
5565a450 170 size_t n, char __user *buf)
2235acb2
JC
171{
172 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
173
174 u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
d5857d65 175 u8 *data;
b26a2188 176 int ret, max_copied, bytes_to_rip, dead_offset;
2235acb2
JC
177
178 /* A userspace program has probably made an error if it tries to
179 * read something that is not a whole number of bpds.
180 * Return an error.
181 */
b4281733 182 if (n % ring->buf.bytes_per_datum) {
2235acb2
JC
183 ret = -EINVAL;
184 printk(KERN_INFO "Ring buffer read request not whole number of"
ffcab07a 185 "samples: Request bytes %zd, Current bytes per datum %d\n",
b4281733 186 n, ring->buf.bytes_per_datum);
2235acb2
JC
187 goto error_ret;
188 }
189 /* Limit size to whole of ring buffer */
b4281733
JC
190 bytes_to_rip = min((size_t)(ring->buf.bytes_per_datum*ring->buf.length),
191 n);
2235acb2 192
d5857d65
JC
193 data = kmalloc(bytes_to_rip, GFP_KERNEL);
194 if (data == NULL) {
2235acb2
JC
195 ret = -ENOMEM;
196 goto error_ret;
197 }
198
199 /* build local copy */
200 initial_read_p = ring->read_p;
19ca92e0 201 if (unlikely(initial_read_p == NULL)) { /* No data here as yet */
2235acb2
JC
202 ret = 0;
203 goto error_free_data_cpy;
204 }
205
206 initial_write_p = ring->write_p;
207
208 /* Need a consistent pair */
209 while ((initial_read_p != ring->read_p)
210 || (initial_write_p != ring->write_p)) {
211 initial_read_p = ring->read_p;
212 initial_write_p = ring->write_p;
213 }
214 if (initial_write_p == initial_read_p) {
215 /* No new data available.*/
216 ret = 0;
217 goto error_free_data_cpy;
218 }
219
220 if (initial_write_p >= initial_read_p + bytes_to_rip) {
221 /* write_p is greater than necessary, all is easy */
222 max_copied = bytes_to_rip;
d5857d65 223 memcpy(data, initial_read_p, max_copied);
2235acb2
JC
224 end_read_p = initial_read_p + max_copied;
225 } else if (initial_write_p > initial_read_p) {
226 /*not enough data to cpy */
227 max_copied = initial_write_p - initial_read_p;
d5857d65 228 memcpy(data, initial_read_p, max_copied);
2235acb2
JC
229 end_read_p = initial_write_p;
230 } else {
231 /* going through 'end' of ring buffer */
232 max_copied = ring->data
ffcab07a 233 + ring->buf.length*ring->buf.bytes_per_datum - initial_read_p;
d5857d65 234 memcpy(data, initial_read_p, max_copied);
2235acb2
JC
235 /* possible we are done if we align precisely with end */
236 if (max_copied == bytes_to_rip)
237 end_read_p = ring->data;
238 else if (initial_write_p
239 > ring->data + bytes_to_rip - max_copied) {
240 /* enough data to finish */
d5857d65 241 memcpy(data + max_copied, ring->data,
2235acb2
JC
242 bytes_to_rip - max_copied);
243 max_copied = bytes_to_rip;
244 end_read_p = ring->data + (bytes_to_rip - max_copied);
245 } else { /* not enough data */
d5857d65 246 memcpy(data + max_copied, ring->data,
2235acb2
JC
247 initial_write_p - ring->data);
248 max_copied += initial_write_p - ring->data;
249 end_read_p = initial_write_p;
250 }
251 }
252 /* Now to verify which section was cleanly copied - i.e. how far
253 * read pointer has been pushed */
254 current_read_p = ring->read_p;
255
256 if (initial_read_p <= current_read_p)
b26a2188 257 dead_offset = current_read_p - initial_read_p;
2235acb2 258 else
b26a2188 259 dead_offset = ring->buf.length*ring->buf.bytes_per_datum
2235acb2
JC
260 - (initial_read_p - current_read_p);
261
262 /* possible issue if the initial write has been lapped or indeed
263 * the point we were reading to has been passed */
264 /* No valid data read.
265 * In this case the read pointer is already correct having been
266 * pushed further than we would look. */
b26a2188 267 if (max_copied - dead_offset < 0) {
2235acb2
JC
268 ret = 0;
269 goto error_free_data_cpy;
270 }
271
272 /* setup the next read position */
273 /* Beware, this may fail due to concurrency fun and games.
274 * Possible that sufficient fill commands have run to push the read
275 * pointer past where we would be after the rip. If this occurs, leave
276 * it be.
277 */
278 /* Tricky - deal with loops */
279
280 while (ring->read_p != end_read_p)
281 ring->read_p = end_read_p;
282
b26a2188 283 ret = max_copied - dead_offset;
2235acb2 284
b26a2188 285 if (copy_to_user(buf, data + dead_offset, ret)) {
d5857d65
JC
286 ret = -EFAULT;
287 goto error_free_data_cpy;
288 }
a7348347
JC
289
290 if (bytes_to_rip >= ring->buf.length*ring->buf.bytes_per_datum/2)
291 ring->buf.stufftoread = 0;
292
2235acb2 293error_free_data_cpy:
d5857d65 294 kfree(data);
2235acb2 295error_ret:
d5857d65 296
2235acb2
JC
297 return ret;
298}
2235acb2 299
14555b14 300static int iio_store_to_sw_rb(struct iio_buffer *r,
5565a450
JC
301 u8 *data,
302 s64 timestamp)
2235acb2
JC
303{
304 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
305 return iio_store_to_sw_ring(ring, data, timestamp);
306}
2235acb2 307
19ca92e0
GKH
308static int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring,
309 unsigned char *data)
2235acb2
JC
310{
311 unsigned char *last_written_p_copy;
312
313 iio_mark_sw_rb_in_use(&ring->buf);
314again:
315 barrier();
316 last_written_p_copy = ring->last_written_p;
317 barrier(); /*unnessecary? */
318 /* Check there is anything here */
19ca92e0 319 if (last_written_p_copy == NULL)
2235acb2 320 return -EAGAIN;
ffcab07a 321 memcpy(data, last_written_p_copy, ring->buf.bytes_per_datum);
2235acb2 322
8474ddd7 323 if (unlikely(ring->last_written_p != last_written_p_copy))
2235acb2
JC
324 goto again;
325
326 iio_unmark_sw_rb_in_use(&ring->buf);
327 return 0;
328}
329
14555b14 330static int iio_read_last_from_sw_rb(struct iio_buffer *r,
2235acb2
JC
331 unsigned char *data)
332{
333 return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data);
334}
2235acb2 335
14555b14 336static int iio_request_update_sw_rb(struct iio_buffer *r)
2235acb2
JC
337{
338 int ret = 0;
339 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
340
a7348347 341 r->stufftoread = false;
2235acb2
JC
342 spin_lock(&ring->use_lock);
343 if (!ring->update_needed)
344 goto error_ret;
345 if (ring->use_count) {
346 ret = -EAGAIN;
347 goto error_ret;
348 }
349 __iio_free_sw_ring_buffer(ring);
ffcab07a 350 ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bytes_per_datum,
6f2dfb31 351 ring->buf.length);
2235acb2
JC
352error_ret:
353 spin_unlock(&ring->use_lock);
354 return ret;
355}
2235acb2 356
14555b14 357static int iio_get_bytes_per_datum_sw_rb(struct iio_buffer *r)
2235acb2
JC
358{
359 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
ffcab07a 360 return ring->buf.bytes_per_datum;
2235acb2 361}
2235acb2 362
14555b14 363static int iio_set_bytes_per_datum_sw_rb(struct iio_buffer *r, size_t bpd)
2235acb2 364{
ffcab07a
MS
365 if (r->bytes_per_datum != bpd) {
366 r->bytes_per_datum = bpd;
5565a450
JC
367 if (r->access->mark_param_change)
368 r->access->mark_param_change(r);
2235acb2
JC
369 }
370 return 0;
371}
2235acb2 372
14555b14 373static int iio_get_length_sw_rb(struct iio_buffer *r)
2235acb2
JC
374{
375 return r->length;
376}
2235acb2 377
14555b14 378static int iio_set_length_sw_rb(struct iio_buffer *r, int length)
2235acb2
JC
379{
380 if (r->length != length) {
381 r->length = length;
5565a450
JC
382 if (r->access->mark_param_change)
383 r->access->mark_param_change(r);
2235acb2
JC
384 }
385 return 0;
386}
2235acb2 387
14555b14 388static int iio_mark_update_needed_sw_rb(struct iio_buffer *r)
2235acb2
JC
389{
390 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
391 ring->update_needed = true;
392 return 0;
393}
2235acb2 394
14555b14
JC
395static IIO_BUFFER_ENABLE_ATTR;
396static IIO_BUFFER_BYTES_PER_DATUM_ATTR;
397static IIO_BUFFER_LENGTH_ATTR;
2235acb2
JC
398
399/* Standard set of ring buffer attributes */
400static struct attribute *iio_ring_attributes[] = {
401 &dev_attr_length.attr,
ffcab07a
MS
402 &dev_attr_bytes_per_datum.attr,
403 &dev_attr_enable.attr,
2235acb2
JC
404 NULL,
405};
406
407static struct attribute_group iio_ring_attribute_group = {
408 .attrs = iio_ring_attributes,
1aa04278 409 .name = "buffer",
2235acb2
JC
410};
411
14555b14 412struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
2235acb2 413{
14555b14 414 struct iio_buffer *buf;
2235acb2
JC
415 struct iio_sw_ring_buffer *ring;
416
417 ring = kzalloc(sizeof *ring, GFP_KERNEL);
418 if (!ring)
19ca92e0 419 return NULL;
5565a450 420 ring->update_needed = true;
2235acb2 421 buf = &ring->buf;
14555b14 422 iio_buffer_init(buf, indio_dev);
6f2dfb31 423 __iio_init_sw_ring_buffer(ring);
1aa04278 424 buf->attrs = &iio_ring_attribute_group;
2235acb2
JC
425
426 return buf;
427}
428EXPORT_SYMBOL(iio_sw_rb_allocate);
429
14555b14 430void iio_sw_rb_free(struct iio_buffer *r)
2235acb2 431{
1aa04278 432 kfree(iio_to_sw_ring(r));
2235acb2
JC
433}
434EXPORT_SYMBOL(iio_sw_rb_free);
ad577f8d 435
14555b14 436const struct iio_buffer_access_funcs ring_sw_access_funcs = {
5565a450
JC
437 .mark_in_use = &iio_mark_sw_rb_in_use,
438 .unmark_in_use = &iio_unmark_sw_rb_in_use,
439 .store_to = &iio_store_to_sw_rb,
440 .read_last = &iio_read_last_from_sw_rb,
441 .read_first_n = &iio_read_first_n_sw_rb,
442 .mark_param_change = &iio_mark_update_needed_sw_rb,
443 .request_update = &iio_request_update_sw_rb,
444 .get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
445 .set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
446 .get_length = &iio_get_length_sw_rb,
447 .set_length = &iio_set_length_sw_rb,
448};
449EXPORT_SYMBOL(ring_sw_access_funcs);
450
2235acb2
JC
451MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
452MODULE_LICENSE("GPL");
This page took 0.24348 seconds and 5 git commands to generate.