staging:iio: rename ring_generic.h -> buffer_generic.h
[deliverable/linux.git] / drivers / staging / iio / industrialio-ring.c
1 /* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * Handling of ring allocation / resizing.
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16 #include <linux/kernel.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/cdev.h>
20 #include <linux/slab.h>
21 #include <linux/poll.h>
22
23 #include "iio.h"
24 #include "iio_core.h"
25 #include "sysfs.h"
26 #include "buffer_generic.h"
27
28 static const char * const iio_endian_prefix[] = {
29 [IIO_BE] = "be",
30 [IIO_LE] = "le",
31 };
32
33 /**
34 * iio_ring_read_first_n_outer() - chrdev read for ring buffer access
35 *
36 * This function relies on all ring buffer implementations having an
37 * iio_ring _bufer as their first element.
38 **/
39 ssize_t iio_ring_read_first_n_outer(struct file *filp, char __user *buf,
40 size_t n, loff_t *f_ps)
41 {
42 struct iio_dev *indio_dev = filp->private_data;
43 struct iio_ring_buffer *rb = indio_dev->ring;
44
45 if (!rb->access->read_first_n)
46 return -EINVAL;
47 return rb->access->read_first_n(rb, n, buf);
48 }
49
50 /**
51 * iio_ring_poll() - poll the ring to find out if it has data
52 */
53 unsigned int iio_ring_poll(struct file *filp,
54 struct poll_table_struct *wait)
55 {
56 struct iio_dev *indio_dev = filp->private_data;
57 struct iio_ring_buffer *rb = indio_dev->ring;
58
59 poll_wait(filp, &rb->pollq, wait);
60 if (rb->stufftoread)
61 return POLLIN | POLLRDNORM;
62 /* need a way of knowing if there may be enough data... */
63 return 0;
64 }
65
66 void iio_chrdev_ring_open(struct iio_dev *indio_dev)
67 {
68 struct iio_ring_buffer *rb = indio_dev->ring;
69 if (rb && rb->access->mark_in_use)
70 rb->access->mark_in_use(rb);
71 }
72
73 void iio_chrdev_ring_release(struct iio_dev *indio_dev)
74 {
75 struct iio_ring_buffer *rb = indio_dev->ring;
76
77 clear_bit(IIO_BUSY_BIT_POS, &rb->flags);
78 if (rb->access->unmark_in_use)
79 rb->access->unmark_in_use(rb);
80
81 }
82
83 void iio_ring_buffer_init(struct iio_ring_buffer *ring,
84 struct iio_dev *dev_info)
85 {
86 ring->indio_dev = dev_info;
87 init_waitqueue_head(&ring->pollq);
88 }
89 EXPORT_SYMBOL(iio_ring_buffer_init);
90
91 static ssize_t iio_show_scan_index(struct device *dev,
92 struct device_attribute *attr,
93 char *buf)
94 {
95 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
96 }
97
98 static ssize_t iio_show_fixed_type(struct device *dev,
99 struct device_attribute *attr,
100 char *buf)
101 {
102 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
103 u8 type = this_attr->c->scan_type.endianness;
104
105 if (type == IIO_CPU) {
106 if (__LITTLE_ENDIAN)
107 type = IIO_LE;
108 else
109 type = IIO_BE;
110 }
111 return sprintf(buf, "%s:%c%d/%d>>%u\n",
112 iio_endian_prefix[type],
113 this_attr->c->scan_type.sign,
114 this_attr->c->scan_type.realbits,
115 this_attr->c->scan_type.storagebits,
116 this_attr->c->scan_type.shift);
117 }
118
119 static ssize_t iio_scan_el_show(struct device *dev,
120 struct device_attribute *attr,
121 char *buf)
122 {
123 int ret;
124 struct iio_dev *dev_info = dev_get_drvdata(dev);
125
126 ret = iio_scan_mask_query(dev_info->ring,
127 to_iio_dev_attr(attr)->address);
128 if (ret < 0)
129 return ret;
130 return sprintf(buf, "%d\n", ret);
131 }
132
133 static int iio_scan_mask_clear(struct iio_ring_buffer *ring, int bit)
134 {
135 clear_bit(bit, ring->scan_mask);
136 ring->scan_count--;
137 return 0;
138 }
139
140 static ssize_t iio_scan_el_store(struct device *dev,
141 struct device_attribute *attr,
142 const char *buf,
143 size_t len)
144 {
145 int ret = 0;
146 bool state;
147 struct iio_dev *indio_dev = dev_get_drvdata(dev);
148 struct iio_ring_buffer *ring = indio_dev->ring;
149 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
150
151 state = !(buf[0] == '0');
152 mutex_lock(&indio_dev->mlock);
153 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
154 ret = -EBUSY;
155 goto error_ret;
156 }
157 ret = iio_scan_mask_query(ring, this_attr->address);
158 if (ret < 0)
159 goto error_ret;
160 if (!state && ret) {
161 ret = iio_scan_mask_clear(ring, this_attr->address);
162 if (ret)
163 goto error_ret;
164 } else if (state && !ret) {
165 ret = iio_scan_mask_set(ring, this_attr->address);
166 if (ret)
167 goto error_ret;
168 }
169
170 error_ret:
171 mutex_unlock(&indio_dev->mlock);
172
173 return ret ? ret : len;
174
175 }
176
177 static ssize_t iio_scan_el_ts_show(struct device *dev,
178 struct device_attribute *attr,
179 char *buf)
180 {
181 struct iio_dev *dev_info = dev_get_drvdata(dev);
182 return sprintf(buf, "%d\n", dev_info->ring->scan_timestamp);
183 }
184
185 static ssize_t iio_scan_el_ts_store(struct device *dev,
186 struct device_attribute *attr,
187 const char *buf,
188 size_t len)
189 {
190 int ret = 0;
191 struct iio_dev *indio_dev = dev_get_drvdata(dev);
192 bool state;
193
194 state = !(buf[0] == '0');
195 mutex_lock(&indio_dev->mlock);
196 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
197 ret = -EBUSY;
198 goto error_ret;
199 }
200 indio_dev->ring->scan_timestamp = state;
201 error_ret:
202 mutex_unlock(&indio_dev->mlock);
203
204 return ret ? ret : len;
205 }
206
207 static int iio_ring_add_channel_sysfs(struct iio_dev *indio_dev,
208 const struct iio_chan_spec *chan)
209 {
210 int ret, attrcount = 0;
211 struct iio_ring_buffer *ring = indio_dev->ring;
212
213 ret = __iio_add_chan_devattr("index",
214 chan,
215 &iio_show_scan_index,
216 NULL,
217 0,
218 0,
219 &indio_dev->dev,
220 &ring->scan_el_dev_attr_list);
221 if (ret)
222 goto error_ret;
223 attrcount++;
224 ret = __iio_add_chan_devattr("type",
225 chan,
226 &iio_show_fixed_type,
227 NULL,
228 0,
229 0,
230 &indio_dev->dev,
231 &ring->scan_el_dev_attr_list);
232 if (ret)
233 goto error_ret;
234 attrcount++;
235 if (chan->type != IIO_TIMESTAMP)
236 ret = __iio_add_chan_devattr("en",
237 chan,
238 &iio_scan_el_show,
239 &iio_scan_el_store,
240 chan->scan_index,
241 0,
242 &indio_dev->dev,
243 &ring->scan_el_dev_attr_list);
244 else
245 ret = __iio_add_chan_devattr("en",
246 chan,
247 &iio_scan_el_ts_show,
248 &iio_scan_el_ts_store,
249 chan->scan_index,
250 0,
251 &indio_dev->dev,
252 &ring->scan_el_dev_attr_list);
253 attrcount++;
254 ret = attrcount;
255 error_ret:
256 return ret;
257 }
258
259 static void iio_ring_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
260 struct iio_dev_attr *p)
261 {
262 kfree(p->dev_attr.attr.name);
263 kfree(p);
264 }
265
266 static void __iio_ring_attr_cleanup(struct iio_dev *indio_dev)
267 {
268 struct iio_dev_attr *p, *n;
269 struct iio_ring_buffer *ring = indio_dev->ring;
270
271 list_for_each_entry_safe(p, n,
272 &ring->scan_el_dev_attr_list, l)
273 iio_ring_remove_and_free_scan_dev_attr(indio_dev, p);
274 }
275
276 static const char * const iio_scan_elements_group_name = "scan_elements";
277
278 int iio_ring_buffer_register(struct iio_dev *indio_dev,
279 const struct iio_chan_spec *channels,
280 int num_channels)
281 {
282 struct iio_dev_attr *p;
283 struct attribute **attr;
284 struct iio_ring_buffer *ring = indio_dev->ring;
285 int ret, i, attrn, attrcount, attrcount_orig = 0;
286
287 if (ring->attrs)
288 indio_dev->groups[indio_dev->groupcounter++] = ring->attrs;
289
290 if (ring->scan_el_attrs != NULL) {
291 attr = ring->scan_el_attrs->attrs;
292 while (*attr++ != NULL)
293 attrcount_orig++;
294 }
295 attrcount = attrcount_orig;
296 INIT_LIST_HEAD(&ring->scan_el_dev_attr_list);
297 if (channels) {
298 /* new magic */
299 for (i = 0; i < num_channels; i++) {
300 /* Establish necessary mask length */
301 if (channels[i].scan_index >
302 (int)indio_dev->masklength - 1)
303 indio_dev->masklength
304 = indio_dev->channels[i].scan_index + 1;
305
306 ret = iio_ring_add_channel_sysfs(indio_dev,
307 &channels[i]);
308 if (ret < 0)
309 goto error_cleanup_dynamic;
310 attrcount += ret;
311 }
312 if (indio_dev->masklength && ring->scan_mask == NULL) {
313 ring->scan_mask
314 = kzalloc(sizeof(*ring->scan_mask)*
315 BITS_TO_LONGS(indio_dev->masklength),
316 GFP_KERNEL);
317 if (ring->scan_mask == NULL) {
318 ret = -ENOMEM;
319 goto error_cleanup_dynamic;
320 }
321 }
322 }
323
324 ring->scan_el_group.name = iio_scan_elements_group_name;
325
326 ring->scan_el_group.attrs
327 = kzalloc(sizeof(ring->scan_el_group.attrs[0])*(attrcount + 1),
328 GFP_KERNEL);
329 if (ring->scan_el_group.attrs == NULL) {
330 ret = -ENOMEM;
331 goto error_free_scan_mask;
332 }
333 if (ring->scan_el_attrs)
334 memcpy(ring->scan_el_group.attrs, ring->scan_el_attrs,
335 sizeof(ring->scan_el_group.attrs[0])*attrcount_orig);
336 attrn = attrcount_orig;
337
338 list_for_each_entry(p, &ring->scan_el_dev_attr_list, l)
339 ring->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
340 indio_dev->groups[indio_dev->groupcounter++] = &ring->scan_el_group;
341
342 return 0;
343
344 error_free_scan_mask:
345 kfree(ring->scan_mask);
346 error_cleanup_dynamic:
347 __iio_ring_attr_cleanup(indio_dev);
348
349 return ret;
350 }
351 EXPORT_SYMBOL(iio_ring_buffer_register);
352
353 void iio_ring_buffer_unregister(struct iio_dev *indio_dev)
354 {
355 kfree(indio_dev->ring->scan_mask);
356 kfree(indio_dev->ring->scan_el_group.attrs);
357 __iio_ring_attr_cleanup(indio_dev);
358 }
359 EXPORT_SYMBOL(iio_ring_buffer_unregister);
360
361 ssize_t iio_read_ring_length(struct device *dev,
362 struct device_attribute *attr,
363 char *buf)
364 {
365 struct iio_dev *indio_dev = dev_get_drvdata(dev);
366 struct iio_ring_buffer *ring = indio_dev->ring;
367
368 if (ring->access->get_length)
369 return sprintf(buf, "%d\n",
370 ring->access->get_length(ring));
371
372 return 0;
373 }
374 EXPORT_SYMBOL(iio_read_ring_length);
375
376 ssize_t iio_write_ring_length(struct device *dev,
377 struct device_attribute *attr,
378 const char *buf,
379 size_t len)
380 {
381 int ret;
382 ulong val;
383 struct iio_dev *indio_dev = dev_get_drvdata(dev);
384 struct iio_ring_buffer *ring = indio_dev->ring;
385
386 ret = strict_strtoul(buf, 10, &val);
387 if (ret)
388 return ret;
389
390 if (ring->access->get_length)
391 if (val == ring->access->get_length(ring))
392 return len;
393
394 if (ring->access->set_length) {
395 ring->access->set_length(ring, val);
396 if (ring->access->mark_param_change)
397 ring->access->mark_param_change(ring);
398 }
399
400 return len;
401 }
402 EXPORT_SYMBOL(iio_write_ring_length);
403
404 ssize_t iio_read_ring_bytes_per_datum(struct device *dev,
405 struct device_attribute *attr,
406 char *buf)
407 {
408 struct iio_dev *indio_dev = dev_get_drvdata(dev);
409 struct iio_ring_buffer *ring = indio_dev->ring;
410
411 if (ring->access->get_bytes_per_datum)
412 return sprintf(buf, "%d\n",
413 ring->access->get_bytes_per_datum(ring));
414
415 return 0;
416 }
417 EXPORT_SYMBOL(iio_read_ring_bytes_per_datum);
418
419 ssize_t iio_store_ring_enable(struct device *dev,
420 struct device_attribute *attr,
421 const char *buf,
422 size_t len)
423 {
424 int ret;
425 bool requested_state, current_state;
426 int previous_mode;
427 struct iio_dev *dev_info = dev_get_drvdata(dev);
428 struct iio_ring_buffer *ring = dev_info->ring;
429
430 mutex_lock(&dev_info->mlock);
431 previous_mode = dev_info->currentmode;
432 requested_state = !(buf[0] == '0');
433 current_state = !!(previous_mode & INDIO_ALL_BUFFER_MODES);
434 if (current_state == requested_state) {
435 printk(KERN_INFO "iio-ring, current state requested again\n");
436 goto done;
437 }
438 if (requested_state) {
439 if (ring->setup_ops->preenable) {
440 ret = ring->setup_ops->preenable(dev_info);
441 if (ret) {
442 printk(KERN_ERR
443 "Buffer not started:"
444 "ring preenable failed\n");
445 goto error_ret;
446 }
447 }
448 if (ring->access->request_update) {
449 ret = ring->access->request_update(ring);
450 if (ret) {
451 printk(KERN_INFO
452 "Buffer not started:"
453 "ring parameter update failed\n");
454 goto error_ret;
455 }
456 }
457 if (ring->access->mark_in_use)
458 ring->access->mark_in_use(ring);
459 /* Definitely possible for devices to support both of these.*/
460 if (dev_info->modes & INDIO_BUFFER_TRIGGERED) {
461 if (!dev_info->trig) {
462 printk(KERN_INFO
463 "Buffer not started: no trigger\n");
464 ret = -EINVAL;
465 if (ring->access->unmark_in_use)
466 ring->access->unmark_in_use(ring);
467 goto error_ret;
468 }
469 dev_info->currentmode = INDIO_BUFFER_TRIGGERED;
470 } else if (dev_info->modes & INDIO_BUFFER_HARDWARE)
471 dev_info->currentmode = INDIO_BUFFER_HARDWARE;
472 else { /* should never be reached */
473 ret = -EINVAL;
474 goto error_ret;
475 }
476
477 if (ring->setup_ops->postenable) {
478 ret = ring->setup_ops->postenable(dev_info);
479 if (ret) {
480 printk(KERN_INFO
481 "Buffer not started:"
482 "postenable failed\n");
483 if (ring->access->unmark_in_use)
484 ring->access->unmark_in_use(ring);
485 dev_info->currentmode = previous_mode;
486 if (ring->setup_ops->postdisable)
487 ring->setup_ops->postdisable(dev_info);
488 goto error_ret;
489 }
490 }
491 } else {
492 if (ring->setup_ops->predisable) {
493 ret = ring->setup_ops->predisable(dev_info);
494 if (ret)
495 goto error_ret;
496 }
497 if (ring->access->unmark_in_use)
498 ring->access->unmark_in_use(ring);
499 dev_info->currentmode = INDIO_DIRECT_MODE;
500 if (ring->setup_ops->postdisable) {
501 ret = ring->setup_ops->postdisable(dev_info);
502 if (ret)
503 goto error_ret;
504 }
505 }
506 done:
507 mutex_unlock(&dev_info->mlock);
508 return len;
509
510 error_ret:
511 mutex_unlock(&dev_info->mlock);
512 return ret;
513 }
514 EXPORT_SYMBOL(iio_store_ring_enable);
515
516 ssize_t iio_show_ring_enable(struct device *dev,
517 struct device_attribute *attr,
518 char *buf)
519 {
520 struct iio_dev *dev_info = dev_get_drvdata(dev);
521 return sprintf(buf, "%d\n", !!(dev_info->currentmode
522 & INDIO_ALL_BUFFER_MODES));
523 }
524 EXPORT_SYMBOL(iio_show_ring_enable);
525
526 int iio_sw_ring_preenable(struct iio_dev *indio_dev)
527 {
528 struct iio_ring_buffer *ring = indio_dev->ring;
529 size_t size;
530 dev_dbg(&indio_dev->dev, "%s\n", __func__);
531 /* Check if there are any scan elements enabled, if not fail*/
532 if (!(ring->scan_count || ring->scan_timestamp))
533 return -EINVAL;
534 if (ring->scan_timestamp)
535 if (ring->scan_count)
536 /* Timestamp (aligned to s64) and data */
537 size = (((ring->scan_count * ring->bpe)
538 + sizeof(s64) - 1)
539 & ~(sizeof(s64) - 1))
540 + sizeof(s64);
541 else /* Timestamp only */
542 size = sizeof(s64);
543 else /* Data only */
544 size = ring->scan_count * ring->bpe;
545 ring->access->set_bytes_per_datum(ring, size);
546
547 return 0;
548 }
549 EXPORT_SYMBOL(iio_sw_ring_preenable);
550
551
552 /* note NULL used as error indicator as it doesn't make sense. */
553 static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
554 unsigned int masklength,
555 unsigned long *mask)
556 {
557 if (bitmap_empty(mask, masklength))
558 return NULL;
559 while (*av_masks) {
560 if (bitmap_subset(mask, av_masks, masklength))
561 return av_masks;
562 av_masks += BITS_TO_LONGS(masklength);
563 }
564 return NULL;
565 }
566
567 /**
568 * iio_scan_mask_set() - set particular bit in the scan mask
569 * @ring: the ring buffer whose scan mask we are interested in
570 * @bit: the bit to be set.
571 **/
572 int iio_scan_mask_set(struct iio_ring_buffer *ring, int bit)
573 {
574 struct iio_dev *dev_info = ring->indio_dev;
575 unsigned long *mask;
576 unsigned long *trialmask;
577
578 trialmask = kmalloc(sizeof(*trialmask)*
579 BITS_TO_LONGS(dev_info->masklength),
580 GFP_KERNEL);
581
582 if (trialmask == NULL)
583 return -ENOMEM;
584 if (!dev_info->masklength) {
585 WARN_ON("trying to set scan mask prior to registering ring\n");
586 kfree(trialmask);
587 return -EINVAL;
588 }
589 bitmap_copy(trialmask, ring->scan_mask, dev_info->masklength);
590 set_bit(bit, trialmask);
591
592 if (dev_info->available_scan_masks) {
593 mask = iio_scan_mask_match(dev_info->available_scan_masks,
594 dev_info->masklength,
595 trialmask);
596 if (!mask) {
597 kfree(trialmask);
598 return -EINVAL;
599 }
600 }
601 bitmap_copy(ring->scan_mask, trialmask, dev_info->masklength);
602 ring->scan_count++;
603
604 kfree(trialmask);
605
606 return 0;
607 };
608 EXPORT_SYMBOL_GPL(iio_scan_mask_set);
609
610 int iio_scan_mask_query(struct iio_ring_buffer *ring, int bit)
611 {
612 struct iio_dev *dev_info = ring->indio_dev;
613 long *mask;
614
615 if (bit > dev_info->masklength)
616 return -EINVAL;
617
618 if (!ring->scan_mask)
619 return 0;
620 if (dev_info->available_scan_masks)
621 mask = iio_scan_mask_match(dev_info->available_scan_masks,
622 dev_info->masklength,
623 ring->scan_mask);
624 else
625 mask = ring->scan_mask;
626 if (!mask)
627 return 0;
628
629 return test_bit(bit, mask);
630 };
631 EXPORT_SYMBOL_GPL(iio_scan_mask_query);
This page took 0.066341 seconds and 5 git commands to generate.