1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of buffer allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
25 #include <linux/iio/iio.h>
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
30 static const char * const iio_endian_prefix
[] = {
35 static bool iio_buffer_is_active(struct iio_buffer
*buf
)
37 return !list_empty(&buf
->buffer_list
);
41 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
43 * This function relies on all buffer implementations having an
44 * iio_buffer as their first element.
46 ssize_t
iio_buffer_read_first_n_outer(struct file
*filp
, char __user
*buf
,
47 size_t n
, loff_t
*f_ps
)
49 struct iio_dev
*indio_dev
= filp
->private_data
;
50 struct iio_buffer
*rb
= indio_dev
->buffer
;
55 if (!rb
|| !rb
->access
->read_first_n
)
57 return rb
->access
->read_first_n(rb
, n
, buf
);
61 * iio_buffer_poll() - poll the buffer to find out if it has data
63 unsigned int iio_buffer_poll(struct file
*filp
,
64 struct poll_table_struct
*wait
)
66 struct iio_dev
*indio_dev
= filp
->private_data
;
67 struct iio_buffer
*rb
= indio_dev
->buffer
;
72 poll_wait(filp
, &rb
->pollq
, wait
);
74 return POLLIN
| POLLRDNORM
;
75 /* need a way of knowing if there may be enough data... */
80 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
81 * @indio_dev: The IIO device
83 * Wakes up the event waitqueue used for poll(). Should usually
84 * be called when the device is unregistered.
86 void iio_buffer_wakeup_poll(struct iio_dev
*indio_dev
)
88 if (!indio_dev
->buffer
)
91 wake_up(&indio_dev
->buffer
->pollq
);
94 void iio_buffer_init(struct iio_buffer
*buffer
)
96 INIT_LIST_HEAD(&buffer
->demux_list
);
97 INIT_LIST_HEAD(&buffer
->buffer_list
);
98 init_waitqueue_head(&buffer
->pollq
);
99 kref_init(&buffer
->ref
);
101 EXPORT_SYMBOL(iio_buffer_init
);
103 static ssize_t
iio_show_scan_index(struct device
*dev
,
104 struct device_attribute
*attr
,
107 return sprintf(buf
, "%u\n", to_iio_dev_attr(attr
)->c
->scan_index
);
110 static ssize_t
iio_show_fixed_type(struct device
*dev
,
111 struct device_attribute
*attr
,
114 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
115 u8 type
= this_attr
->c
->scan_type
.endianness
;
117 if (type
== IIO_CPU
) {
118 #ifdef __LITTLE_ENDIAN
124 return sprintf(buf
, "%s:%c%d/%d>>%u\n",
125 iio_endian_prefix
[type
],
126 this_attr
->c
->scan_type
.sign
,
127 this_attr
->c
->scan_type
.realbits
,
128 this_attr
->c
->scan_type
.storagebits
,
129 this_attr
->c
->scan_type
.shift
);
132 static ssize_t
iio_scan_el_show(struct device
*dev
,
133 struct device_attribute
*attr
,
137 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
139 ret
= test_bit(to_iio_dev_attr(attr
)->address
,
140 indio_dev
->buffer
->scan_mask
);
142 return sprintf(buf
, "%d\n", ret
);
145 static int iio_scan_mask_clear(struct iio_buffer
*buffer
, int bit
)
147 clear_bit(bit
, buffer
->scan_mask
);
151 static ssize_t
iio_scan_el_store(struct device
*dev
,
152 struct device_attribute
*attr
,
158 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
159 struct iio_buffer
*buffer
= indio_dev
->buffer
;
160 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
162 ret
= strtobool(buf
, &state
);
165 mutex_lock(&indio_dev
->mlock
);
166 if (iio_buffer_is_active(indio_dev
->buffer
)) {
170 ret
= iio_scan_mask_query(indio_dev
, buffer
, this_attr
->address
);
174 ret
= iio_scan_mask_clear(buffer
, this_attr
->address
);
177 } else if (state
&& !ret
) {
178 ret
= iio_scan_mask_set(indio_dev
, buffer
, this_attr
->address
);
184 mutex_unlock(&indio_dev
->mlock
);
186 return ret
< 0 ? ret
: len
;
190 static ssize_t
iio_scan_el_ts_show(struct device
*dev
,
191 struct device_attribute
*attr
,
194 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
195 return sprintf(buf
, "%d\n", indio_dev
->buffer
->scan_timestamp
);
198 static ssize_t
iio_scan_el_ts_store(struct device
*dev
,
199 struct device_attribute
*attr
,
204 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
207 ret
= strtobool(buf
, &state
);
211 mutex_lock(&indio_dev
->mlock
);
212 if (iio_buffer_is_active(indio_dev
->buffer
)) {
216 indio_dev
->buffer
->scan_timestamp
= state
;
218 mutex_unlock(&indio_dev
->mlock
);
220 return ret
? ret
: len
;
223 static int iio_buffer_add_channel_sysfs(struct iio_dev
*indio_dev
,
224 const struct iio_chan_spec
*chan
)
226 int ret
, attrcount
= 0;
227 struct iio_buffer
*buffer
= indio_dev
->buffer
;
229 ret
= __iio_add_chan_devattr("index",
231 &iio_show_scan_index
,
236 &buffer
->scan_el_dev_attr_list
);
240 ret
= __iio_add_chan_devattr("type",
242 &iio_show_fixed_type
,
247 &buffer
->scan_el_dev_attr_list
);
251 if (chan
->type
!= IIO_TIMESTAMP
)
252 ret
= __iio_add_chan_devattr("en",
259 &buffer
->scan_el_dev_attr_list
);
261 ret
= __iio_add_chan_devattr("en",
263 &iio_scan_el_ts_show
,
264 &iio_scan_el_ts_store
,
268 &buffer
->scan_el_dev_attr_list
);
277 static const char * const iio_scan_elements_group_name
= "scan_elements";
279 int iio_buffer_register(struct iio_dev
*indio_dev
,
280 const struct iio_chan_spec
*channels
,
283 struct iio_dev_attr
*p
;
284 struct attribute
**attr
;
285 struct iio_buffer
*buffer
= indio_dev
->buffer
;
286 int ret
, i
, attrn
, attrcount
, attrcount_orig
= 0;
289 indio_dev
->groups
[indio_dev
->groupcounter
++] = buffer
->attrs
;
291 if (buffer
->scan_el_attrs
!= NULL
) {
292 attr
= buffer
->scan_el_attrs
->attrs
;
293 while (*attr
++ != NULL
)
296 attrcount
= attrcount_orig
;
297 INIT_LIST_HEAD(&buffer
->scan_el_dev_attr_list
);
300 for (i
= 0; i
< num_channels
; i
++) {
301 if (channels
[i
].scan_index
< 0)
304 /* Establish necessary mask length */
305 if (channels
[i
].scan_index
>
306 (int)indio_dev
->masklength
- 1)
307 indio_dev
->masklength
308 = channels
[i
].scan_index
+ 1;
310 ret
= iio_buffer_add_channel_sysfs(indio_dev
,
313 goto error_cleanup_dynamic
;
315 if (channels
[i
].type
== IIO_TIMESTAMP
)
316 indio_dev
->scan_index_timestamp
=
317 channels
[i
].scan_index
;
319 if (indio_dev
->masklength
&& buffer
->scan_mask
== NULL
) {
320 buffer
->scan_mask
= kcalloc(BITS_TO_LONGS(indio_dev
->masklength
),
321 sizeof(*buffer
->scan_mask
),
323 if (buffer
->scan_mask
== NULL
) {
325 goto error_cleanup_dynamic
;
330 buffer
->scan_el_group
.name
= iio_scan_elements_group_name
;
332 buffer
->scan_el_group
.attrs
= kcalloc(attrcount
+ 1,
333 sizeof(buffer
->scan_el_group
.attrs
[0]),
335 if (buffer
->scan_el_group
.attrs
== NULL
) {
337 goto error_free_scan_mask
;
339 if (buffer
->scan_el_attrs
)
340 memcpy(buffer
->scan_el_group
.attrs
, buffer
->scan_el_attrs
,
341 sizeof(buffer
->scan_el_group
.attrs
[0])*attrcount_orig
);
342 attrn
= attrcount_orig
;
344 list_for_each_entry(p
, &buffer
->scan_el_dev_attr_list
, l
)
345 buffer
->scan_el_group
.attrs
[attrn
++] = &p
->dev_attr
.attr
;
346 indio_dev
->groups
[indio_dev
->groupcounter
++] = &buffer
->scan_el_group
;
350 error_free_scan_mask
:
351 kfree(buffer
->scan_mask
);
352 error_cleanup_dynamic
:
353 iio_free_chan_devattr_list(&buffer
->scan_el_dev_attr_list
);
357 EXPORT_SYMBOL(iio_buffer_register
);
359 void iio_buffer_unregister(struct iio_dev
*indio_dev
)
361 kfree(indio_dev
->buffer
->scan_mask
);
362 kfree(indio_dev
->buffer
->scan_el_group
.attrs
);
363 iio_free_chan_devattr_list(&indio_dev
->buffer
->scan_el_dev_attr_list
);
365 EXPORT_SYMBOL(iio_buffer_unregister
);
367 ssize_t
iio_buffer_read_length(struct device
*dev
,
368 struct device_attribute
*attr
,
371 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
372 struct iio_buffer
*buffer
= indio_dev
->buffer
;
374 if (buffer
->access
->get_length
)
375 return sprintf(buf
, "%d\n",
376 buffer
->access
->get_length(buffer
));
380 EXPORT_SYMBOL(iio_buffer_read_length
);
382 ssize_t
iio_buffer_write_length(struct device
*dev
,
383 struct device_attribute
*attr
,
387 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
388 struct iio_buffer
*buffer
= indio_dev
->buffer
;
392 ret
= kstrtouint(buf
, 10, &val
);
396 if (buffer
->access
->get_length
)
397 if (val
== buffer
->access
->get_length(buffer
))
400 mutex_lock(&indio_dev
->mlock
);
401 if (iio_buffer_is_active(indio_dev
->buffer
)) {
404 if (buffer
->access
->set_length
)
405 buffer
->access
->set_length(buffer
, val
);
408 mutex_unlock(&indio_dev
->mlock
);
410 return ret
? ret
: len
;
412 EXPORT_SYMBOL(iio_buffer_write_length
);
414 ssize_t
iio_buffer_show_enable(struct device
*dev
,
415 struct device_attribute
*attr
,
418 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
419 return sprintf(buf
, "%d\n", iio_buffer_is_active(indio_dev
->buffer
));
421 EXPORT_SYMBOL(iio_buffer_show_enable
);
423 /* Note NULL used as error indicator as it doesn't make sense. */
424 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks
,
425 unsigned int masklength
,
426 const unsigned long *mask
)
428 if (bitmap_empty(mask
, masklength
))
431 if (bitmap_subset(mask
, av_masks
, masklength
))
433 av_masks
+= BITS_TO_LONGS(masklength
);
438 static int iio_compute_scan_bytes(struct iio_dev
*indio_dev
,
439 const unsigned long *mask
, bool timestamp
)
441 const struct iio_chan_spec
*ch
;
445 /* How much space will the demuxed element take? */
446 for_each_set_bit(i
, mask
,
447 indio_dev
->masklength
) {
448 ch
= iio_find_channel_from_si(indio_dev
, i
);
449 length
= ch
->scan_type
.storagebits
/ 8;
450 bytes
= ALIGN(bytes
, length
);
454 ch
= iio_find_channel_from_si(indio_dev
,
455 indio_dev
->scan_index_timestamp
);
456 length
= ch
->scan_type
.storagebits
/ 8;
457 bytes
= ALIGN(bytes
, length
);
463 static void iio_buffer_activate(struct iio_dev
*indio_dev
,
464 struct iio_buffer
*buffer
)
466 iio_buffer_get(buffer
);
467 list_add(&buffer
->buffer_list
, &indio_dev
->buffer_list
);
470 static void iio_buffer_deactivate(struct iio_buffer
*buffer
)
472 list_del_init(&buffer
->buffer_list
);
473 iio_buffer_put(buffer
);
476 void iio_disable_all_buffers(struct iio_dev
*indio_dev
)
478 struct iio_buffer
*buffer
, *_buffer
;
480 if (list_empty(&indio_dev
->buffer_list
))
483 if (indio_dev
->setup_ops
->predisable
)
484 indio_dev
->setup_ops
->predisable(indio_dev
);
486 list_for_each_entry_safe(buffer
, _buffer
,
487 &indio_dev
->buffer_list
, buffer_list
)
488 iio_buffer_deactivate(buffer
);
490 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
491 if (indio_dev
->setup_ops
->postdisable
)
492 indio_dev
->setup_ops
->postdisable(indio_dev
);
494 if (indio_dev
->available_scan_masks
== NULL
)
495 kfree(indio_dev
->active_scan_mask
);
498 static void iio_buffer_update_bytes_per_datum(struct iio_dev
*indio_dev
,
499 struct iio_buffer
*buffer
)
503 if (!buffer
->access
->set_bytes_per_datum
)
506 bytes
= iio_compute_scan_bytes(indio_dev
, buffer
->scan_mask
,
507 buffer
->scan_timestamp
);
509 buffer
->access
->set_bytes_per_datum(buffer
, bytes
);
512 static int __iio_update_buffers(struct iio_dev
*indio_dev
,
513 struct iio_buffer
*insert_buffer
,
514 struct iio_buffer
*remove_buffer
)
518 struct iio_buffer
*buffer
;
519 unsigned long *compound_mask
;
520 const unsigned long *old_mask
;
522 /* Wind down existing buffers - iff there are any */
523 if (!list_empty(&indio_dev
->buffer_list
)) {
524 if (indio_dev
->setup_ops
->predisable
) {
525 ret
= indio_dev
->setup_ops
->predisable(indio_dev
);
529 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
530 if (indio_dev
->setup_ops
->postdisable
) {
531 ret
= indio_dev
->setup_ops
->postdisable(indio_dev
);
536 /* Keep a copy of current setup to allow roll back */
537 old_mask
= indio_dev
->active_scan_mask
;
538 if (!indio_dev
->available_scan_masks
)
539 indio_dev
->active_scan_mask
= NULL
;
542 iio_buffer_deactivate(remove_buffer
);
544 iio_buffer_activate(indio_dev
, insert_buffer
);
546 /* If no buffers in list, we are done */
547 if (list_empty(&indio_dev
->buffer_list
)) {
548 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
549 if (indio_dev
->available_scan_masks
== NULL
)
554 /* What scan mask do we actually have? */
555 compound_mask
= kcalloc(BITS_TO_LONGS(indio_dev
->masklength
),
556 sizeof(long), GFP_KERNEL
);
557 if (compound_mask
== NULL
) {
558 if (indio_dev
->available_scan_masks
== NULL
)
562 indio_dev
->scan_timestamp
= 0;
564 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
) {
565 bitmap_or(compound_mask
, compound_mask
, buffer
->scan_mask
,
566 indio_dev
->masklength
);
567 indio_dev
->scan_timestamp
|= buffer
->scan_timestamp
;
569 if (indio_dev
->available_scan_masks
) {
570 indio_dev
->active_scan_mask
=
571 iio_scan_mask_match(indio_dev
->available_scan_masks
,
572 indio_dev
->masklength
,
574 if (indio_dev
->active_scan_mask
== NULL
) {
577 * Note can only occur when adding a buffer.
579 iio_buffer_deactivate(insert_buffer
);
581 indio_dev
->active_scan_mask
= old_mask
;
585 kfree(compound_mask
);
591 indio_dev
->active_scan_mask
= compound_mask
;
594 iio_update_demux(indio_dev
);
597 if (indio_dev
->setup_ops
->preenable
) {
598 ret
= indio_dev
->setup_ops
->preenable(indio_dev
);
601 "Buffer not started: buffer preenable failed (%d)\n", ret
);
602 goto error_remove_inserted
;
605 indio_dev
->scan_bytes
=
606 iio_compute_scan_bytes(indio_dev
,
607 indio_dev
->active_scan_mask
,
608 indio_dev
->scan_timestamp
);
609 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
) {
610 iio_buffer_update_bytes_per_datum(indio_dev
, buffer
);
611 if (buffer
->access
->request_update
) {
612 ret
= buffer
->access
->request_update(buffer
);
615 "Buffer not started: buffer parameter update failed (%d)\n", ret
);
616 goto error_run_postdisable
;
620 if (indio_dev
->info
->update_scan_mode
) {
621 ret
= indio_dev
->info
622 ->update_scan_mode(indio_dev
,
623 indio_dev
->active_scan_mask
);
625 printk(KERN_INFO
"Buffer not started: update scan mode failed (%d)\n", ret
);
626 goto error_run_postdisable
;
629 /* Definitely possible for devices to support both of these. */
630 if (indio_dev
->modes
& INDIO_BUFFER_TRIGGERED
) {
631 if (!indio_dev
->trig
) {
632 printk(KERN_INFO
"Buffer not started: no trigger\n");
634 /* Can only occur on first buffer */
635 goto error_run_postdisable
;
637 indio_dev
->currentmode
= INDIO_BUFFER_TRIGGERED
;
638 } else if (indio_dev
->modes
& INDIO_BUFFER_HARDWARE
) {
639 indio_dev
->currentmode
= INDIO_BUFFER_HARDWARE
;
640 } else { /* Should never be reached */
642 goto error_run_postdisable
;
645 if (indio_dev
->setup_ops
->postenable
) {
646 ret
= indio_dev
->setup_ops
->postenable(indio_dev
);
649 "Buffer not started: postenable failed (%d)\n", ret
);
650 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
651 if (indio_dev
->setup_ops
->postdisable
)
652 indio_dev
->setup_ops
->postdisable(indio_dev
);
653 goto error_disable_all_buffers
;
657 if (indio_dev
->available_scan_masks
)
658 kfree(compound_mask
);
664 error_disable_all_buffers
:
665 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
666 error_run_postdisable
:
667 if (indio_dev
->setup_ops
->postdisable
)
668 indio_dev
->setup_ops
->postdisable(indio_dev
);
669 error_remove_inserted
:
672 iio_buffer_deactivate(insert_buffer
);
673 indio_dev
->active_scan_mask
= old_mask
;
674 kfree(compound_mask
);
680 int iio_update_buffers(struct iio_dev
*indio_dev
,
681 struct iio_buffer
*insert_buffer
,
682 struct iio_buffer
*remove_buffer
)
686 if (insert_buffer
== remove_buffer
)
689 mutex_lock(&indio_dev
->info_exist_lock
);
690 mutex_lock(&indio_dev
->mlock
);
692 if (insert_buffer
&& iio_buffer_is_active(insert_buffer
))
693 insert_buffer
= NULL
;
695 if (remove_buffer
&& !iio_buffer_is_active(remove_buffer
))
696 remove_buffer
= NULL
;
698 if (!insert_buffer
&& !remove_buffer
) {
703 if (indio_dev
->info
== NULL
) {
708 ret
= __iio_update_buffers(indio_dev
, insert_buffer
, remove_buffer
);
711 mutex_unlock(&indio_dev
->mlock
);
712 mutex_unlock(&indio_dev
->info_exist_lock
);
716 EXPORT_SYMBOL_GPL(iio_update_buffers
);
718 ssize_t
iio_buffer_store_enable(struct device
*dev
,
719 struct device_attribute
*attr
,
724 bool requested_state
;
725 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
728 ret
= strtobool(buf
, &requested_state
);
732 mutex_lock(&indio_dev
->mlock
);
734 /* Find out if it is in the list */
735 inlist
= iio_buffer_is_active(indio_dev
->buffer
);
736 /* Already in desired state */
737 if (inlist
== requested_state
)
741 ret
= __iio_update_buffers(indio_dev
,
742 indio_dev
->buffer
, NULL
);
744 ret
= __iio_update_buffers(indio_dev
,
745 NULL
, indio_dev
->buffer
);
750 mutex_unlock(&indio_dev
->mlock
);
751 return (ret
< 0) ? ret
: len
;
753 EXPORT_SYMBOL(iio_buffer_store_enable
);
756 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
757 * @indio_dev: the iio device
758 * @mask: scan mask to be checked
760 * Return true if exactly one bit is set in the scan mask, false otherwise. It
761 * can be used for devices where only one channel can be active for sampling at
764 bool iio_validate_scan_mask_onehot(struct iio_dev
*indio_dev
,
765 const unsigned long *mask
)
767 return bitmap_weight(mask
, indio_dev
->masklength
) == 1;
769 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot
);
771 static bool iio_validate_scan_mask(struct iio_dev
*indio_dev
,
772 const unsigned long *mask
)
774 if (!indio_dev
->setup_ops
->validate_scan_mask
)
777 return indio_dev
->setup_ops
->validate_scan_mask(indio_dev
, mask
);
781 * iio_scan_mask_set() - set particular bit in the scan mask
782 * @indio_dev: the iio device
783 * @buffer: the buffer whose scan mask we are interested in
784 * @bit: the bit to be set.
786 * Note that at this point we have no way of knowing what other
787 * buffers might request, hence this code only verifies that the
788 * individual buffers request is plausible.
790 int iio_scan_mask_set(struct iio_dev
*indio_dev
,
791 struct iio_buffer
*buffer
, int bit
)
793 const unsigned long *mask
;
794 unsigned long *trialmask
;
796 trialmask
= kmalloc(sizeof(*trialmask
)*
797 BITS_TO_LONGS(indio_dev
->masklength
),
800 if (trialmask
== NULL
)
802 if (!indio_dev
->masklength
) {
803 WARN_ON("Trying to set scanmask prior to registering buffer\n");
804 goto err_invalid_mask
;
806 bitmap_copy(trialmask
, buffer
->scan_mask
, indio_dev
->masklength
);
807 set_bit(bit
, trialmask
);
809 if (!iio_validate_scan_mask(indio_dev
, trialmask
))
810 goto err_invalid_mask
;
812 if (indio_dev
->available_scan_masks
) {
813 mask
= iio_scan_mask_match(indio_dev
->available_scan_masks
,
814 indio_dev
->masklength
,
817 goto err_invalid_mask
;
819 bitmap_copy(buffer
->scan_mask
, trialmask
, indio_dev
->masklength
);
829 EXPORT_SYMBOL_GPL(iio_scan_mask_set
);
831 int iio_scan_mask_query(struct iio_dev
*indio_dev
,
832 struct iio_buffer
*buffer
, int bit
)
834 if (bit
> indio_dev
->masklength
)
837 if (!buffer
->scan_mask
)
840 return test_bit(bit
, buffer
->scan_mask
);
842 EXPORT_SYMBOL_GPL(iio_scan_mask_query
);
845 * struct iio_demux_table() - table describing demux memcpy ops
846 * @from: index to copy from
847 * @to: index to copy to
848 * @length: how many bytes to copy
849 * @l: list head used for management
851 struct iio_demux_table
{
858 static const void *iio_demux(struct iio_buffer
*buffer
,
861 struct iio_demux_table
*t
;
863 if (list_empty(&buffer
->demux_list
))
865 list_for_each_entry(t
, &buffer
->demux_list
, l
)
866 memcpy(buffer
->demux_bounce
+ t
->to
,
867 datain
+ t
->from
, t
->length
);
869 return buffer
->demux_bounce
;
872 static int iio_push_to_buffer(struct iio_buffer
*buffer
, const void *data
)
874 const void *dataout
= iio_demux(buffer
, data
);
876 return buffer
->access
->store_to(buffer
, dataout
);
879 static void iio_buffer_demux_free(struct iio_buffer
*buffer
)
881 struct iio_demux_table
*p
, *q
;
882 list_for_each_entry_safe(p
, q
, &buffer
->demux_list
, l
) {
889 int iio_push_to_buffers(struct iio_dev
*indio_dev
, const void *data
)
892 struct iio_buffer
*buf
;
894 list_for_each_entry(buf
, &indio_dev
->buffer_list
, buffer_list
) {
895 ret
= iio_push_to_buffer(buf
, data
);
902 EXPORT_SYMBOL_GPL(iio_push_to_buffers
);
904 static int iio_buffer_update_demux(struct iio_dev
*indio_dev
,
905 struct iio_buffer
*buffer
)
907 const struct iio_chan_spec
*ch
;
908 int ret
, in_ind
= -1, out_ind
, length
;
909 unsigned in_loc
= 0, out_loc
= 0;
910 struct iio_demux_table
*p
;
912 /* Clear out any old demux */
913 iio_buffer_demux_free(buffer
);
914 kfree(buffer
->demux_bounce
);
915 buffer
->demux_bounce
= NULL
;
917 /* First work out which scan mode we will actually have */
918 if (bitmap_equal(indio_dev
->active_scan_mask
,
920 indio_dev
->masklength
))
923 /* Now we have the two masks, work from least sig and build up sizes */
924 for_each_set_bit(out_ind
,
925 indio_dev
->active_scan_mask
,
926 indio_dev
->masklength
) {
927 in_ind
= find_next_bit(indio_dev
->active_scan_mask
,
928 indio_dev
->masklength
,
930 while (in_ind
!= out_ind
) {
931 in_ind
= find_next_bit(indio_dev
->active_scan_mask
,
932 indio_dev
->masklength
,
934 ch
= iio_find_channel_from_si(indio_dev
, in_ind
);
935 length
= ch
->scan_type
.storagebits
/8;
936 /* Make sure we are aligned */
939 in_loc
+= length
- in_loc
% length
;
941 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
944 goto error_clear_mux_table
;
946 ch
= iio_find_channel_from_si(indio_dev
, in_ind
);
947 length
= ch
->scan_type
.storagebits
/8;
948 if (out_loc
% length
)
949 out_loc
+= length
- out_loc
% length
;
951 in_loc
+= length
- in_loc
% length
;
955 list_add_tail(&p
->l
, &buffer
->demux_list
);
959 /* Relies on scan_timestamp being last */
960 if (buffer
->scan_timestamp
) {
961 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
964 goto error_clear_mux_table
;
966 ch
= iio_find_channel_from_si(indio_dev
,
967 indio_dev
->scan_index_timestamp
);
968 length
= ch
->scan_type
.storagebits
/8;
969 if (out_loc
% length
)
970 out_loc
+= length
- out_loc
% length
;
972 in_loc
+= length
- in_loc
% length
;
976 list_add_tail(&p
->l
, &buffer
->demux_list
);
980 buffer
->demux_bounce
= kzalloc(out_loc
, GFP_KERNEL
);
981 if (buffer
->demux_bounce
== NULL
) {
983 goto error_clear_mux_table
;
987 error_clear_mux_table
:
988 iio_buffer_demux_free(buffer
);
993 int iio_update_demux(struct iio_dev
*indio_dev
)
995 struct iio_buffer
*buffer
;
998 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
) {
999 ret
= iio_buffer_update_demux(indio_dev
, buffer
);
1001 goto error_clear_mux_table
;
1005 error_clear_mux_table
:
1006 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
)
1007 iio_buffer_demux_free(buffer
);
1011 EXPORT_SYMBOL_GPL(iio_update_demux
);
1014 * iio_buffer_release() - Free a buffer's resources
1015 * @ref: Pointer to the kref embedded in the iio_buffer struct
1017 * This function is called when the last reference to the buffer has been
1018 * dropped. It will typically free all resources allocated by the buffer. Do not
1019 * call this function manually, always use iio_buffer_put() when done using a
1022 static void iio_buffer_release(struct kref
*ref
)
1024 struct iio_buffer
*buffer
= container_of(ref
, struct iio_buffer
, ref
);
1026 buffer
->access
->release(buffer
);
1030 * iio_buffer_get() - Grab a reference to the buffer
1031 * @buffer: The buffer to grab a reference for, may be NULL
1033 * Returns the pointer to the buffer that was passed into the function.
1035 struct iio_buffer
*iio_buffer_get(struct iio_buffer
*buffer
)
1038 kref_get(&buffer
->ref
);
1042 EXPORT_SYMBOL_GPL(iio_buffer_get
);
1045 * iio_buffer_put() - Release the reference to the buffer
1046 * @buffer: The buffer to release the reference for, may be NULL
1048 void iio_buffer_put(struct iio_buffer
*buffer
)
1051 kref_put(&buffer
->ref
, iio_buffer_release
);
1053 EXPORT_SYMBOL_GPL(iio_buffer_put
);