1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of ring allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/device.h>
19 #include <linux/cdev.h>
20 #include <linux/slab.h>
21 #include <linux/poll.h>
26 #include "buffer_generic.h"
28 static const char * const iio_endian_prefix
[] = {
34 * iio_ring_read_first_n_outer() - chrdev read for ring buffer access
36 * This function relies on all ring buffer implementations having an
37 * iio_ring _bufer as their first element.
39 ssize_t
iio_ring_read_first_n_outer(struct file
*filp
, char __user
*buf
,
40 size_t n
, loff_t
*f_ps
)
42 struct iio_dev
*indio_dev
= filp
->private_data
;
43 struct iio_ring_buffer
*rb
= indio_dev
->ring
;
45 if (!rb
->access
->read_first_n
)
47 return rb
->access
->read_first_n(rb
, n
, buf
);
51 * iio_ring_poll() - poll the ring to find out if it has data
53 unsigned int iio_ring_poll(struct file
*filp
,
54 struct poll_table_struct
*wait
)
56 struct iio_dev
*indio_dev
= filp
->private_data
;
57 struct iio_ring_buffer
*rb
= indio_dev
->ring
;
59 poll_wait(filp
, &rb
->pollq
, wait
);
61 return POLLIN
| POLLRDNORM
;
62 /* need a way of knowing if there may be enough data... */
66 void iio_chrdev_ring_open(struct iio_dev
*indio_dev
)
68 struct iio_ring_buffer
*rb
= indio_dev
->ring
;
69 if (rb
&& rb
->access
->mark_in_use
)
70 rb
->access
->mark_in_use(rb
);
73 void iio_chrdev_ring_release(struct iio_dev
*indio_dev
)
75 struct iio_ring_buffer
*rb
= indio_dev
->ring
;
77 clear_bit(IIO_BUSY_BIT_POS
, &rb
->flags
);
78 if (rb
->access
->unmark_in_use
)
79 rb
->access
->unmark_in_use(rb
);
83 void iio_ring_buffer_init(struct iio_ring_buffer
*ring
,
84 struct iio_dev
*dev_info
)
86 ring
->indio_dev
= dev_info
;
87 init_waitqueue_head(&ring
->pollq
);
89 EXPORT_SYMBOL(iio_ring_buffer_init
);
91 static ssize_t
iio_show_scan_index(struct device
*dev
,
92 struct device_attribute
*attr
,
95 return sprintf(buf
, "%u\n", to_iio_dev_attr(attr
)->c
->scan_index
);
98 static ssize_t
iio_show_fixed_type(struct device
*dev
,
99 struct device_attribute
*attr
,
102 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
103 u8 type
= this_attr
->c
->scan_type
.endianness
;
105 if (type
== IIO_CPU
) {
111 return sprintf(buf
, "%s:%c%d/%d>>%u\n",
112 iio_endian_prefix
[type
],
113 this_attr
->c
->scan_type
.sign
,
114 this_attr
->c
->scan_type
.realbits
,
115 this_attr
->c
->scan_type
.storagebits
,
116 this_attr
->c
->scan_type
.shift
);
119 static ssize_t
iio_scan_el_show(struct device
*dev
,
120 struct device_attribute
*attr
,
124 struct iio_dev
*dev_info
= dev_get_drvdata(dev
);
126 ret
= iio_scan_mask_query(dev_info
->ring
,
127 to_iio_dev_attr(attr
)->address
);
130 return sprintf(buf
, "%d\n", ret
);
133 static int iio_scan_mask_clear(struct iio_ring_buffer
*ring
, int bit
)
135 clear_bit(bit
, ring
->scan_mask
);
140 static ssize_t
iio_scan_el_store(struct device
*dev
,
141 struct device_attribute
*attr
,
147 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
148 struct iio_ring_buffer
*ring
= indio_dev
->ring
;
149 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
151 state
= !(buf
[0] == '0');
152 mutex_lock(&indio_dev
->mlock
);
153 if (indio_dev
->currentmode
== INDIO_BUFFER_TRIGGERED
) {
157 ret
= iio_scan_mask_query(ring
, this_attr
->address
);
161 ret
= iio_scan_mask_clear(ring
, this_attr
->address
);
164 } else if (state
&& !ret
) {
165 ret
= iio_scan_mask_set(ring
, this_attr
->address
);
171 mutex_unlock(&indio_dev
->mlock
);
173 return ret
? ret
: len
;
177 static ssize_t
iio_scan_el_ts_show(struct device
*dev
,
178 struct device_attribute
*attr
,
181 struct iio_dev
*dev_info
= dev_get_drvdata(dev
);
182 return sprintf(buf
, "%d\n", dev_info
->ring
->scan_timestamp
);
185 static ssize_t
iio_scan_el_ts_store(struct device
*dev
,
186 struct device_attribute
*attr
,
191 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
194 state
= !(buf
[0] == '0');
195 mutex_lock(&indio_dev
->mlock
);
196 if (indio_dev
->currentmode
== INDIO_BUFFER_TRIGGERED
) {
200 indio_dev
->ring
->scan_timestamp
= state
;
202 mutex_unlock(&indio_dev
->mlock
);
204 return ret
? ret
: len
;
207 static int iio_ring_add_channel_sysfs(struct iio_dev
*indio_dev
,
208 const struct iio_chan_spec
*chan
)
210 int ret
, attrcount
= 0;
211 struct iio_ring_buffer
*ring
= indio_dev
->ring
;
213 ret
= __iio_add_chan_devattr("index",
215 &iio_show_scan_index
,
220 &ring
->scan_el_dev_attr_list
);
224 ret
= __iio_add_chan_devattr("type",
226 &iio_show_fixed_type
,
231 &ring
->scan_el_dev_attr_list
);
235 if (chan
->type
!= IIO_TIMESTAMP
)
236 ret
= __iio_add_chan_devattr("en",
243 &ring
->scan_el_dev_attr_list
);
245 ret
= __iio_add_chan_devattr("en",
247 &iio_scan_el_ts_show
,
248 &iio_scan_el_ts_store
,
252 &ring
->scan_el_dev_attr_list
);
259 static void iio_ring_remove_and_free_scan_dev_attr(struct iio_dev
*indio_dev
,
260 struct iio_dev_attr
*p
)
262 kfree(p
->dev_attr
.attr
.name
);
266 static void __iio_ring_attr_cleanup(struct iio_dev
*indio_dev
)
268 struct iio_dev_attr
*p
, *n
;
269 struct iio_ring_buffer
*ring
= indio_dev
->ring
;
271 list_for_each_entry_safe(p
, n
,
272 &ring
->scan_el_dev_attr_list
, l
)
273 iio_ring_remove_and_free_scan_dev_attr(indio_dev
, p
);
276 static const char * const iio_scan_elements_group_name
= "scan_elements";
278 int iio_ring_buffer_register(struct iio_dev
*indio_dev
,
279 const struct iio_chan_spec
*channels
,
282 struct iio_dev_attr
*p
;
283 struct attribute
**attr
;
284 struct iio_ring_buffer
*ring
= indio_dev
->ring
;
285 int ret
, i
, attrn
, attrcount
, attrcount_orig
= 0;
288 indio_dev
->groups
[indio_dev
->groupcounter
++] = ring
->attrs
;
290 if (ring
->scan_el_attrs
!= NULL
) {
291 attr
= ring
->scan_el_attrs
->attrs
;
292 while (*attr
++ != NULL
)
295 attrcount
= attrcount_orig
;
296 INIT_LIST_HEAD(&ring
->scan_el_dev_attr_list
);
299 for (i
= 0; i
< num_channels
; i
++) {
300 /* Establish necessary mask length */
301 if (channels
[i
].scan_index
>
302 (int)indio_dev
->masklength
- 1)
303 indio_dev
->masklength
304 = indio_dev
->channels
[i
].scan_index
+ 1;
306 ret
= iio_ring_add_channel_sysfs(indio_dev
,
309 goto error_cleanup_dynamic
;
312 if (indio_dev
->masklength
&& ring
->scan_mask
== NULL
) {
314 = kzalloc(sizeof(*ring
->scan_mask
)*
315 BITS_TO_LONGS(indio_dev
->masklength
),
317 if (ring
->scan_mask
== NULL
) {
319 goto error_cleanup_dynamic
;
324 ring
->scan_el_group
.name
= iio_scan_elements_group_name
;
326 ring
->scan_el_group
.attrs
327 = kzalloc(sizeof(ring
->scan_el_group
.attrs
[0])*(attrcount
+ 1),
329 if (ring
->scan_el_group
.attrs
== NULL
) {
331 goto error_free_scan_mask
;
333 if (ring
->scan_el_attrs
)
334 memcpy(ring
->scan_el_group
.attrs
, ring
->scan_el_attrs
,
335 sizeof(ring
->scan_el_group
.attrs
[0])*attrcount_orig
);
336 attrn
= attrcount_orig
;
338 list_for_each_entry(p
, &ring
->scan_el_dev_attr_list
, l
)
339 ring
->scan_el_group
.attrs
[attrn
++] = &p
->dev_attr
.attr
;
340 indio_dev
->groups
[indio_dev
->groupcounter
++] = &ring
->scan_el_group
;
344 error_free_scan_mask
:
345 kfree(ring
->scan_mask
);
346 error_cleanup_dynamic
:
347 __iio_ring_attr_cleanup(indio_dev
);
351 EXPORT_SYMBOL(iio_ring_buffer_register
);
353 void iio_ring_buffer_unregister(struct iio_dev
*indio_dev
)
355 kfree(indio_dev
->ring
->scan_mask
);
356 kfree(indio_dev
->ring
->scan_el_group
.attrs
);
357 __iio_ring_attr_cleanup(indio_dev
);
359 EXPORT_SYMBOL(iio_ring_buffer_unregister
);
361 ssize_t
iio_read_ring_length(struct device
*dev
,
362 struct device_attribute
*attr
,
365 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
366 struct iio_ring_buffer
*ring
= indio_dev
->ring
;
368 if (ring
->access
->get_length
)
369 return sprintf(buf
, "%d\n",
370 ring
->access
->get_length(ring
));
374 EXPORT_SYMBOL(iio_read_ring_length
);
376 ssize_t
iio_write_ring_length(struct device
*dev
,
377 struct device_attribute
*attr
,
383 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
384 struct iio_ring_buffer
*ring
= indio_dev
->ring
;
386 ret
= strict_strtoul(buf
, 10, &val
);
390 if (ring
->access
->get_length
)
391 if (val
== ring
->access
->get_length(ring
))
394 if (ring
->access
->set_length
) {
395 ring
->access
->set_length(ring
, val
);
396 if (ring
->access
->mark_param_change
)
397 ring
->access
->mark_param_change(ring
);
402 EXPORT_SYMBOL(iio_write_ring_length
);
404 ssize_t
iio_read_ring_bytes_per_datum(struct device
*dev
,
405 struct device_attribute
*attr
,
408 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
409 struct iio_ring_buffer
*ring
= indio_dev
->ring
;
411 if (ring
->access
->get_bytes_per_datum
)
412 return sprintf(buf
, "%d\n",
413 ring
->access
->get_bytes_per_datum(ring
));
417 EXPORT_SYMBOL(iio_read_ring_bytes_per_datum
);
419 ssize_t
iio_store_ring_enable(struct device
*dev
,
420 struct device_attribute
*attr
,
425 bool requested_state
, current_state
;
427 struct iio_dev
*dev_info
= dev_get_drvdata(dev
);
428 struct iio_ring_buffer
*ring
= dev_info
->ring
;
430 mutex_lock(&dev_info
->mlock
);
431 previous_mode
= dev_info
->currentmode
;
432 requested_state
= !(buf
[0] == '0');
433 current_state
= !!(previous_mode
& INDIO_ALL_BUFFER_MODES
);
434 if (current_state
== requested_state
) {
435 printk(KERN_INFO
"iio-ring, current state requested again\n");
438 if (requested_state
) {
439 if (ring
->setup_ops
->preenable
) {
440 ret
= ring
->setup_ops
->preenable(dev_info
);
443 "Buffer not started:"
444 "ring preenable failed\n");
448 if (ring
->access
->request_update
) {
449 ret
= ring
->access
->request_update(ring
);
452 "Buffer not started:"
453 "ring parameter update failed\n");
457 if (ring
->access
->mark_in_use
)
458 ring
->access
->mark_in_use(ring
);
459 /* Definitely possible for devices to support both of these.*/
460 if (dev_info
->modes
& INDIO_BUFFER_TRIGGERED
) {
461 if (!dev_info
->trig
) {
463 "Buffer not started: no trigger\n");
465 if (ring
->access
->unmark_in_use
)
466 ring
->access
->unmark_in_use(ring
);
469 dev_info
->currentmode
= INDIO_BUFFER_TRIGGERED
;
470 } else if (dev_info
->modes
& INDIO_BUFFER_HARDWARE
)
471 dev_info
->currentmode
= INDIO_BUFFER_HARDWARE
;
472 else { /* should never be reached */
477 if (ring
->setup_ops
->postenable
) {
478 ret
= ring
->setup_ops
->postenable(dev_info
);
481 "Buffer not started:"
482 "postenable failed\n");
483 if (ring
->access
->unmark_in_use
)
484 ring
->access
->unmark_in_use(ring
);
485 dev_info
->currentmode
= previous_mode
;
486 if (ring
->setup_ops
->postdisable
)
487 ring
->setup_ops
->postdisable(dev_info
);
492 if (ring
->setup_ops
->predisable
) {
493 ret
= ring
->setup_ops
->predisable(dev_info
);
497 if (ring
->access
->unmark_in_use
)
498 ring
->access
->unmark_in_use(ring
);
499 dev_info
->currentmode
= INDIO_DIRECT_MODE
;
500 if (ring
->setup_ops
->postdisable
) {
501 ret
= ring
->setup_ops
->postdisable(dev_info
);
507 mutex_unlock(&dev_info
->mlock
);
511 mutex_unlock(&dev_info
->mlock
);
514 EXPORT_SYMBOL(iio_store_ring_enable
);
516 ssize_t
iio_show_ring_enable(struct device
*dev
,
517 struct device_attribute
*attr
,
520 struct iio_dev
*dev_info
= dev_get_drvdata(dev
);
521 return sprintf(buf
, "%d\n", !!(dev_info
->currentmode
522 & INDIO_ALL_BUFFER_MODES
));
524 EXPORT_SYMBOL(iio_show_ring_enable
);
526 int iio_sw_ring_preenable(struct iio_dev
*indio_dev
)
528 struct iio_ring_buffer
*ring
= indio_dev
->ring
;
530 dev_dbg(&indio_dev
->dev
, "%s\n", __func__
);
531 /* Check if there are any scan elements enabled, if not fail*/
532 if (!(ring
->scan_count
|| ring
->scan_timestamp
))
534 if (ring
->scan_timestamp
)
535 if (ring
->scan_count
)
536 /* Timestamp (aligned to s64) and data */
537 size
= (((ring
->scan_count
* ring
->bpe
)
539 & ~(sizeof(s64
) - 1))
541 else /* Timestamp only */
544 size
= ring
->scan_count
* ring
->bpe
;
545 ring
->access
->set_bytes_per_datum(ring
, size
);
549 EXPORT_SYMBOL(iio_sw_ring_preenable
);
552 /* note NULL used as error indicator as it doesn't make sense. */
553 static unsigned long *iio_scan_mask_match(unsigned long *av_masks
,
554 unsigned int masklength
,
557 if (bitmap_empty(mask
, masklength
))
560 if (bitmap_subset(mask
, av_masks
, masklength
))
562 av_masks
+= BITS_TO_LONGS(masklength
);
568 * iio_scan_mask_set() - set particular bit in the scan mask
569 * @ring: the ring buffer whose scan mask we are interested in
570 * @bit: the bit to be set.
572 int iio_scan_mask_set(struct iio_ring_buffer
*ring
, int bit
)
574 struct iio_dev
*dev_info
= ring
->indio_dev
;
576 unsigned long *trialmask
;
578 trialmask
= kmalloc(sizeof(*trialmask
)*
579 BITS_TO_LONGS(dev_info
->masklength
),
582 if (trialmask
== NULL
)
584 if (!dev_info
->masklength
) {
585 WARN_ON("trying to set scan mask prior to registering ring\n");
589 bitmap_copy(trialmask
, ring
->scan_mask
, dev_info
->masklength
);
590 set_bit(bit
, trialmask
);
592 if (dev_info
->available_scan_masks
) {
593 mask
= iio_scan_mask_match(dev_info
->available_scan_masks
,
594 dev_info
->masklength
,
601 bitmap_copy(ring
->scan_mask
, trialmask
, dev_info
->masklength
);
608 EXPORT_SYMBOL_GPL(iio_scan_mask_set
);
610 int iio_scan_mask_query(struct iio_ring_buffer
*ring
, int bit
)
612 struct iio_dev
*dev_info
= ring
->indio_dev
;
615 if (bit
> dev_info
->masklength
)
618 if (!ring
->scan_mask
)
620 if (dev_info
->available_scan_masks
)
621 mask
= iio_scan_mask_match(dev_info
->available_scan_masks
,
622 dev_info
->masklength
,
625 mask
= ring
->scan_mask
;
629 return test_bit(bit
, mask
);
631 EXPORT_SYMBOL_GPL(iio_scan_mask_query
);