1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of buffer allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
25 #include <linux/iio/iio.h>
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
30 static const char * const iio_endian_prefix
[] = {
35 static bool iio_buffer_is_active(struct iio_buffer
*buf
)
37 return !list_empty(&buf
->buffer_list
);
40 static bool iio_buffer_data_available(struct iio_buffer
*buf
)
42 if (buf
->access
->data_available
)
43 return buf
->access
->data_available(buf
);
45 return buf
->stufftoread
;
49 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
51 * This function relies on all buffer implementations having an
52 * iio_buffer as their first element.
54 ssize_t
iio_buffer_read_first_n_outer(struct file
*filp
, char __user
*buf
,
55 size_t n
, loff_t
*f_ps
)
57 struct iio_dev
*indio_dev
= filp
->private_data
;
58 struct iio_buffer
*rb
= indio_dev
->buffer
;
64 if (!rb
|| !rb
->access
->read_first_n
)
68 if (!iio_buffer_data_available(rb
)) {
69 if (filp
->f_flags
& O_NONBLOCK
)
72 ret
= wait_event_interruptible(rb
->pollq
,
73 iio_buffer_data_available(rb
) ||
74 indio_dev
->info
== NULL
);
77 if (indio_dev
->info
== NULL
)
81 ret
= rb
->access
->read_first_n(rb
, n
, buf
);
82 if (ret
== 0 && (filp
->f_flags
& O_NONBLOCK
))
90 * iio_buffer_poll() - poll the buffer to find out if it has data
92 unsigned int iio_buffer_poll(struct file
*filp
,
93 struct poll_table_struct
*wait
)
95 struct iio_dev
*indio_dev
= filp
->private_data
;
96 struct iio_buffer
*rb
= indio_dev
->buffer
;
101 poll_wait(filp
, &rb
->pollq
, wait
);
102 if (iio_buffer_data_available(rb
))
103 return POLLIN
| POLLRDNORM
;
104 /* need a way of knowing if there may be enough data... */
109 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
110 * @indio_dev: The IIO device
112 * Wakes up the event waitqueue used for poll(). Should usually
113 * be called when the device is unregistered.
115 void iio_buffer_wakeup_poll(struct iio_dev
*indio_dev
)
117 if (!indio_dev
->buffer
)
120 wake_up(&indio_dev
->buffer
->pollq
);
123 void iio_buffer_init(struct iio_buffer
*buffer
)
125 INIT_LIST_HEAD(&buffer
->demux_list
);
126 INIT_LIST_HEAD(&buffer
->buffer_list
);
127 init_waitqueue_head(&buffer
->pollq
);
128 kref_init(&buffer
->ref
);
130 EXPORT_SYMBOL(iio_buffer_init
);
132 static ssize_t
iio_show_scan_index(struct device
*dev
,
133 struct device_attribute
*attr
,
136 return sprintf(buf
, "%u\n", to_iio_dev_attr(attr
)->c
->scan_index
);
139 static ssize_t
iio_show_fixed_type(struct device
*dev
,
140 struct device_attribute
*attr
,
143 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
144 u8 type
= this_attr
->c
->scan_type
.endianness
;
146 if (type
== IIO_CPU
) {
147 #ifdef __LITTLE_ENDIAN
153 if (this_attr
->c
->scan_type
.repeat
> 1)
154 return sprintf(buf
, "%s:%c%d/%dX%d>>%u\n",
155 iio_endian_prefix
[type
],
156 this_attr
->c
->scan_type
.sign
,
157 this_attr
->c
->scan_type
.realbits
,
158 this_attr
->c
->scan_type
.storagebits
,
159 this_attr
->c
->scan_type
.repeat
,
160 this_attr
->c
->scan_type
.shift
);
162 return sprintf(buf
, "%s:%c%d/%d>>%u\n",
163 iio_endian_prefix
[type
],
164 this_attr
->c
->scan_type
.sign
,
165 this_attr
->c
->scan_type
.realbits
,
166 this_attr
->c
->scan_type
.storagebits
,
167 this_attr
->c
->scan_type
.shift
);
170 static ssize_t
iio_scan_el_show(struct device
*dev
,
171 struct device_attribute
*attr
,
175 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
177 /* Ensure ret is 0 or 1. */
178 ret
= !!test_bit(to_iio_dev_attr(attr
)->address
,
179 indio_dev
->buffer
->scan_mask
);
181 return sprintf(buf
, "%d\n", ret
);
184 static int iio_scan_mask_clear(struct iio_buffer
*buffer
, int bit
)
186 clear_bit(bit
, buffer
->scan_mask
);
190 static ssize_t
iio_scan_el_store(struct device
*dev
,
191 struct device_attribute
*attr
,
197 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
198 struct iio_buffer
*buffer
= indio_dev
->buffer
;
199 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
201 ret
= strtobool(buf
, &state
);
204 mutex_lock(&indio_dev
->mlock
);
205 if (iio_buffer_is_active(indio_dev
->buffer
)) {
209 ret
= iio_scan_mask_query(indio_dev
, buffer
, this_attr
->address
);
213 ret
= iio_scan_mask_clear(buffer
, this_attr
->address
);
216 } else if (state
&& !ret
) {
217 ret
= iio_scan_mask_set(indio_dev
, buffer
, this_attr
->address
);
223 mutex_unlock(&indio_dev
->mlock
);
225 return ret
< 0 ? ret
: len
;
229 static ssize_t
iio_scan_el_ts_show(struct device
*dev
,
230 struct device_attribute
*attr
,
233 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
234 return sprintf(buf
, "%d\n", indio_dev
->buffer
->scan_timestamp
);
237 static ssize_t
iio_scan_el_ts_store(struct device
*dev
,
238 struct device_attribute
*attr
,
243 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
246 ret
= strtobool(buf
, &state
);
250 mutex_lock(&indio_dev
->mlock
);
251 if (iio_buffer_is_active(indio_dev
->buffer
)) {
255 indio_dev
->buffer
->scan_timestamp
= state
;
257 mutex_unlock(&indio_dev
->mlock
);
259 return ret
? ret
: len
;
262 static int iio_buffer_add_channel_sysfs(struct iio_dev
*indio_dev
,
263 const struct iio_chan_spec
*chan
)
265 int ret
, attrcount
= 0;
266 struct iio_buffer
*buffer
= indio_dev
->buffer
;
268 ret
= __iio_add_chan_devattr("index",
270 &iio_show_scan_index
,
275 &buffer
->scan_el_dev_attr_list
);
279 ret
= __iio_add_chan_devattr("type",
281 &iio_show_fixed_type
,
286 &buffer
->scan_el_dev_attr_list
);
290 if (chan
->type
!= IIO_TIMESTAMP
)
291 ret
= __iio_add_chan_devattr("en",
298 &buffer
->scan_el_dev_attr_list
);
300 ret
= __iio_add_chan_devattr("en",
302 &iio_scan_el_ts_show
,
303 &iio_scan_el_ts_store
,
307 &buffer
->scan_el_dev_attr_list
);
315 static const char * const iio_scan_elements_group_name
= "scan_elements";
317 int iio_buffer_register(struct iio_dev
*indio_dev
,
318 const struct iio_chan_spec
*channels
,
321 struct iio_dev_attr
*p
;
322 struct attribute
**attr
;
323 struct iio_buffer
*buffer
= indio_dev
->buffer
;
324 int ret
, i
, attrn
, attrcount
, attrcount_orig
= 0;
327 indio_dev
->groups
[indio_dev
->groupcounter
++] = buffer
->attrs
;
329 if (buffer
->scan_el_attrs
!= NULL
) {
330 attr
= buffer
->scan_el_attrs
->attrs
;
331 while (*attr
++ != NULL
)
334 attrcount
= attrcount_orig
;
335 INIT_LIST_HEAD(&buffer
->scan_el_dev_attr_list
);
338 for (i
= 0; i
< num_channels
; i
++) {
339 if (channels
[i
].scan_index
< 0)
342 /* Establish necessary mask length */
343 if (channels
[i
].scan_index
>
344 (int)indio_dev
->masklength
- 1)
345 indio_dev
->masklength
346 = channels
[i
].scan_index
+ 1;
348 ret
= iio_buffer_add_channel_sysfs(indio_dev
,
351 goto error_cleanup_dynamic
;
353 if (channels
[i
].type
== IIO_TIMESTAMP
)
354 indio_dev
->scan_index_timestamp
=
355 channels
[i
].scan_index
;
357 if (indio_dev
->masklength
&& buffer
->scan_mask
== NULL
) {
358 buffer
->scan_mask
= kcalloc(BITS_TO_LONGS(indio_dev
->masklength
),
359 sizeof(*buffer
->scan_mask
),
361 if (buffer
->scan_mask
== NULL
) {
363 goto error_cleanup_dynamic
;
368 buffer
->scan_el_group
.name
= iio_scan_elements_group_name
;
370 buffer
->scan_el_group
.attrs
= kcalloc(attrcount
+ 1,
371 sizeof(buffer
->scan_el_group
.attrs
[0]),
373 if (buffer
->scan_el_group
.attrs
== NULL
) {
375 goto error_free_scan_mask
;
377 if (buffer
->scan_el_attrs
)
378 memcpy(buffer
->scan_el_group
.attrs
, buffer
->scan_el_attrs
,
379 sizeof(buffer
->scan_el_group
.attrs
[0])*attrcount_orig
);
380 attrn
= attrcount_orig
;
382 list_for_each_entry(p
, &buffer
->scan_el_dev_attr_list
, l
)
383 buffer
->scan_el_group
.attrs
[attrn
++] = &p
->dev_attr
.attr
;
384 indio_dev
->groups
[indio_dev
->groupcounter
++] = &buffer
->scan_el_group
;
388 error_free_scan_mask
:
389 kfree(buffer
->scan_mask
);
390 error_cleanup_dynamic
:
391 iio_free_chan_devattr_list(&buffer
->scan_el_dev_attr_list
);
395 EXPORT_SYMBOL(iio_buffer_register
);
397 void iio_buffer_unregister(struct iio_dev
*indio_dev
)
399 kfree(indio_dev
->buffer
->scan_mask
);
400 kfree(indio_dev
->buffer
->scan_el_group
.attrs
);
401 iio_free_chan_devattr_list(&indio_dev
->buffer
->scan_el_dev_attr_list
);
403 EXPORT_SYMBOL(iio_buffer_unregister
);
405 ssize_t
iio_buffer_read_length(struct device
*dev
,
406 struct device_attribute
*attr
,
409 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
410 struct iio_buffer
*buffer
= indio_dev
->buffer
;
412 if (buffer
->access
->get_length
)
413 return sprintf(buf
, "%d\n",
414 buffer
->access
->get_length(buffer
));
418 EXPORT_SYMBOL(iio_buffer_read_length
);
420 ssize_t
iio_buffer_write_length(struct device
*dev
,
421 struct device_attribute
*attr
,
425 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
426 struct iio_buffer
*buffer
= indio_dev
->buffer
;
430 ret
= kstrtouint(buf
, 10, &val
);
434 if (buffer
->access
->get_length
)
435 if (val
== buffer
->access
->get_length(buffer
))
438 mutex_lock(&indio_dev
->mlock
);
439 if (iio_buffer_is_active(indio_dev
->buffer
)) {
442 if (buffer
->access
->set_length
)
443 buffer
->access
->set_length(buffer
, val
);
446 mutex_unlock(&indio_dev
->mlock
);
448 return ret
? ret
: len
;
450 EXPORT_SYMBOL(iio_buffer_write_length
);
452 ssize_t
iio_buffer_show_enable(struct device
*dev
,
453 struct device_attribute
*attr
,
456 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
457 return sprintf(buf
, "%d\n", iio_buffer_is_active(indio_dev
->buffer
));
459 EXPORT_SYMBOL(iio_buffer_show_enable
);
461 /* Note NULL used as error indicator as it doesn't make sense. */
462 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks
,
463 unsigned int masklength
,
464 const unsigned long *mask
)
466 if (bitmap_empty(mask
, masklength
))
469 if (bitmap_subset(mask
, av_masks
, masklength
))
471 av_masks
+= BITS_TO_LONGS(masklength
);
476 static int iio_compute_scan_bytes(struct iio_dev
*indio_dev
,
477 const unsigned long *mask
, bool timestamp
)
479 const struct iio_chan_spec
*ch
;
483 /* How much space will the demuxed element take? */
484 for_each_set_bit(i
, mask
,
485 indio_dev
->masklength
) {
486 ch
= iio_find_channel_from_si(indio_dev
, i
);
487 if (ch
->scan_type
.repeat
> 1)
488 length
= ch
->scan_type
.storagebits
/ 8 *
489 ch
->scan_type
.repeat
;
491 length
= ch
->scan_type
.storagebits
/ 8;
492 bytes
= ALIGN(bytes
, length
);
496 ch
= iio_find_channel_from_si(indio_dev
,
497 indio_dev
->scan_index_timestamp
);
498 if (ch
->scan_type
.repeat
> 1)
499 length
= ch
->scan_type
.storagebits
/ 8 *
500 ch
->scan_type
.repeat
;
502 length
= ch
->scan_type
.storagebits
/ 8;
503 bytes
= ALIGN(bytes
, length
);
509 static void iio_buffer_activate(struct iio_dev
*indio_dev
,
510 struct iio_buffer
*buffer
)
512 iio_buffer_get(buffer
);
513 list_add(&buffer
->buffer_list
, &indio_dev
->buffer_list
);
516 static void iio_buffer_deactivate(struct iio_buffer
*buffer
)
518 list_del_init(&buffer
->buffer_list
);
519 iio_buffer_put(buffer
);
522 void iio_disable_all_buffers(struct iio_dev
*indio_dev
)
524 struct iio_buffer
*buffer
, *_buffer
;
526 if (list_empty(&indio_dev
->buffer_list
))
529 if (indio_dev
->setup_ops
->predisable
)
530 indio_dev
->setup_ops
->predisable(indio_dev
);
532 list_for_each_entry_safe(buffer
, _buffer
,
533 &indio_dev
->buffer_list
, buffer_list
)
534 iio_buffer_deactivate(buffer
);
536 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
537 if (indio_dev
->setup_ops
->postdisable
)
538 indio_dev
->setup_ops
->postdisable(indio_dev
);
540 if (indio_dev
->available_scan_masks
== NULL
)
541 kfree(indio_dev
->active_scan_mask
);
544 static void iio_buffer_update_bytes_per_datum(struct iio_dev
*indio_dev
,
545 struct iio_buffer
*buffer
)
549 if (!buffer
->access
->set_bytes_per_datum
)
552 bytes
= iio_compute_scan_bytes(indio_dev
, buffer
->scan_mask
,
553 buffer
->scan_timestamp
);
555 buffer
->access
->set_bytes_per_datum(buffer
, bytes
);
558 static int __iio_update_buffers(struct iio_dev
*indio_dev
,
559 struct iio_buffer
*insert_buffer
,
560 struct iio_buffer
*remove_buffer
)
564 struct iio_buffer
*buffer
;
565 unsigned long *compound_mask
;
566 const unsigned long *old_mask
;
568 /* Wind down existing buffers - iff there are any */
569 if (!list_empty(&indio_dev
->buffer_list
)) {
570 if (indio_dev
->setup_ops
->predisable
) {
571 ret
= indio_dev
->setup_ops
->predisable(indio_dev
);
575 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
576 if (indio_dev
->setup_ops
->postdisable
) {
577 ret
= indio_dev
->setup_ops
->postdisable(indio_dev
);
582 /* Keep a copy of current setup to allow roll back */
583 old_mask
= indio_dev
->active_scan_mask
;
584 if (!indio_dev
->available_scan_masks
)
585 indio_dev
->active_scan_mask
= NULL
;
588 iio_buffer_deactivate(remove_buffer
);
590 iio_buffer_activate(indio_dev
, insert_buffer
);
592 /* If no buffers in list, we are done */
593 if (list_empty(&indio_dev
->buffer_list
)) {
594 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
595 if (indio_dev
->available_scan_masks
== NULL
)
600 /* What scan mask do we actually have? */
601 compound_mask
= kcalloc(BITS_TO_LONGS(indio_dev
->masklength
),
602 sizeof(long), GFP_KERNEL
);
603 if (compound_mask
== NULL
) {
604 if (indio_dev
->available_scan_masks
== NULL
)
608 indio_dev
->scan_timestamp
= 0;
610 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
) {
611 bitmap_or(compound_mask
, compound_mask
, buffer
->scan_mask
,
612 indio_dev
->masklength
);
613 indio_dev
->scan_timestamp
|= buffer
->scan_timestamp
;
615 if (indio_dev
->available_scan_masks
) {
616 indio_dev
->active_scan_mask
=
617 iio_scan_mask_match(indio_dev
->available_scan_masks
,
618 indio_dev
->masklength
,
620 if (indio_dev
->active_scan_mask
== NULL
) {
623 * Note can only occur when adding a buffer.
625 iio_buffer_deactivate(insert_buffer
);
627 indio_dev
->active_scan_mask
= old_mask
;
631 kfree(compound_mask
);
637 indio_dev
->active_scan_mask
= compound_mask
;
640 iio_update_demux(indio_dev
);
643 if (indio_dev
->setup_ops
->preenable
) {
644 ret
= indio_dev
->setup_ops
->preenable(indio_dev
);
647 "Buffer not started: buffer preenable failed (%d)\n", ret
);
648 goto error_remove_inserted
;
651 indio_dev
->scan_bytes
=
652 iio_compute_scan_bytes(indio_dev
,
653 indio_dev
->active_scan_mask
,
654 indio_dev
->scan_timestamp
);
655 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
) {
656 iio_buffer_update_bytes_per_datum(indio_dev
, buffer
);
657 if (buffer
->access
->request_update
) {
658 ret
= buffer
->access
->request_update(buffer
);
661 "Buffer not started: buffer parameter update failed (%d)\n", ret
);
662 goto error_run_postdisable
;
666 if (indio_dev
->info
->update_scan_mode
) {
667 ret
= indio_dev
->info
668 ->update_scan_mode(indio_dev
,
669 indio_dev
->active_scan_mask
);
671 printk(KERN_INFO
"Buffer not started: update scan mode failed (%d)\n", ret
);
672 goto error_run_postdisable
;
675 /* Definitely possible for devices to support both of these. */
676 if (indio_dev
->modes
& INDIO_BUFFER_TRIGGERED
) {
677 if (!indio_dev
->trig
) {
678 printk(KERN_INFO
"Buffer not started: no trigger\n");
680 /* Can only occur on first buffer */
681 goto error_run_postdisable
;
683 indio_dev
->currentmode
= INDIO_BUFFER_TRIGGERED
;
684 } else if (indio_dev
->modes
& INDIO_BUFFER_HARDWARE
) {
685 indio_dev
->currentmode
= INDIO_BUFFER_HARDWARE
;
686 } else { /* Should never be reached */
688 goto error_run_postdisable
;
691 if (indio_dev
->setup_ops
->postenable
) {
692 ret
= indio_dev
->setup_ops
->postenable(indio_dev
);
695 "Buffer not started: postenable failed (%d)\n", ret
);
696 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
697 if (indio_dev
->setup_ops
->postdisable
)
698 indio_dev
->setup_ops
->postdisable(indio_dev
);
699 goto error_disable_all_buffers
;
703 if (indio_dev
->available_scan_masks
)
704 kfree(compound_mask
);
710 error_disable_all_buffers
:
711 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
712 error_run_postdisable
:
713 if (indio_dev
->setup_ops
->postdisable
)
714 indio_dev
->setup_ops
->postdisable(indio_dev
);
715 error_remove_inserted
:
717 iio_buffer_deactivate(insert_buffer
);
718 indio_dev
->active_scan_mask
= old_mask
;
719 kfree(compound_mask
);
723 int iio_update_buffers(struct iio_dev
*indio_dev
,
724 struct iio_buffer
*insert_buffer
,
725 struct iio_buffer
*remove_buffer
)
729 if (insert_buffer
== remove_buffer
)
732 mutex_lock(&indio_dev
->info_exist_lock
);
733 mutex_lock(&indio_dev
->mlock
);
735 if (insert_buffer
&& iio_buffer_is_active(insert_buffer
))
736 insert_buffer
= NULL
;
738 if (remove_buffer
&& !iio_buffer_is_active(remove_buffer
))
739 remove_buffer
= NULL
;
741 if (!insert_buffer
&& !remove_buffer
) {
746 if (indio_dev
->info
== NULL
) {
751 ret
= __iio_update_buffers(indio_dev
, insert_buffer
, remove_buffer
);
754 mutex_unlock(&indio_dev
->mlock
);
755 mutex_unlock(&indio_dev
->info_exist_lock
);
759 EXPORT_SYMBOL_GPL(iio_update_buffers
);
761 ssize_t
iio_buffer_store_enable(struct device
*dev
,
762 struct device_attribute
*attr
,
767 bool requested_state
;
768 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
771 ret
= strtobool(buf
, &requested_state
);
775 mutex_lock(&indio_dev
->mlock
);
777 /* Find out if it is in the list */
778 inlist
= iio_buffer_is_active(indio_dev
->buffer
);
779 /* Already in desired state */
780 if (inlist
== requested_state
)
784 ret
= __iio_update_buffers(indio_dev
,
785 indio_dev
->buffer
, NULL
);
787 ret
= __iio_update_buffers(indio_dev
,
788 NULL
, indio_dev
->buffer
);
793 mutex_unlock(&indio_dev
->mlock
);
794 return (ret
< 0) ? ret
: len
;
796 EXPORT_SYMBOL(iio_buffer_store_enable
);
799 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
800 * @indio_dev: the iio device
801 * @mask: scan mask to be checked
803 * Return true if exactly one bit is set in the scan mask, false otherwise. It
804 * can be used for devices where only one channel can be active for sampling at
807 bool iio_validate_scan_mask_onehot(struct iio_dev
*indio_dev
,
808 const unsigned long *mask
)
810 return bitmap_weight(mask
, indio_dev
->masklength
) == 1;
812 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot
);
814 static bool iio_validate_scan_mask(struct iio_dev
*indio_dev
,
815 const unsigned long *mask
)
817 if (!indio_dev
->setup_ops
->validate_scan_mask
)
820 return indio_dev
->setup_ops
->validate_scan_mask(indio_dev
, mask
);
824 * iio_scan_mask_set() - set particular bit in the scan mask
825 * @indio_dev: the iio device
826 * @buffer: the buffer whose scan mask we are interested in
827 * @bit: the bit to be set.
829 * Note that at this point we have no way of knowing what other
830 * buffers might request, hence this code only verifies that the
831 * individual buffers request is plausible.
833 int iio_scan_mask_set(struct iio_dev
*indio_dev
,
834 struct iio_buffer
*buffer
, int bit
)
836 const unsigned long *mask
;
837 unsigned long *trialmask
;
839 trialmask
= kmalloc(sizeof(*trialmask
)*
840 BITS_TO_LONGS(indio_dev
->masklength
),
843 if (trialmask
== NULL
)
845 if (!indio_dev
->masklength
) {
846 WARN_ON("Trying to set scanmask prior to registering buffer\n");
847 goto err_invalid_mask
;
849 bitmap_copy(trialmask
, buffer
->scan_mask
, indio_dev
->masklength
);
850 set_bit(bit
, trialmask
);
852 if (!iio_validate_scan_mask(indio_dev
, trialmask
))
853 goto err_invalid_mask
;
855 if (indio_dev
->available_scan_masks
) {
856 mask
= iio_scan_mask_match(indio_dev
->available_scan_masks
,
857 indio_dev
->masklength
,
860 goto err_invalid_mask
;
862 bitmap_copy(buffer
->scan_mask
, trialmask
, indio_dev
->masklength
);
872 EXPORT_SYMBOL_GPL(iio_scan_mask_set
);
874 int iio_scan_mask_query(struct iio_dev
*indio_dev
,
875 struct iio_buffer
*buffer
, int bit
)
877 if (bit
> indio_dev
->masklength
)
880 if (!buffer
->scan_mask
)
883 /* Ensure return value is 0 or 1. */
884 return !!test_bit(bit
, buffer
->scan_mask
);
886 EXPORT_SYMBOL_GPL(iio_scan_mask_query
);
889 * struct iio_demux_table() - table describing demux memcpy ops
890 * @from: index to copy from
891 * @to: index to copy to
892 * @length: how many bytes to copy
893 * @l: list head used for management
895 struct iio_demux_table
{
902 static const void *iio_demux(struct iio_buffer
*buffer
,
905 struct iio_demux_table
*t
;
907 if (list_empty(&buffer
->demux_list
))
909 list_for_each_entry(t
, &buffer
->demux_list
, l
)
910 memcpy(buffer
->demux_bounce
+ t
->to
,
911 datain
+ t
->from
, t
->length
);
913 return buffer
->demux_bounce
;
916 static int iio_push_to_buffer(struct iio_buffer
*buffer
, const void *data
)
918 const void *dataout
= iio_demux(buffer
, data
);
920 return buffer
->access
->store_to(buffer
, dataout
);
923 static void iio_buffer_demux_free(struct iio_buffer
*buffer
)
925 struct iio_demux_table
*p
, *q
;
926 list_for_each_entry_safe(p
, q
, &buffer
->demux_list
, l
) {
933 int iio_push_to_buffers(struct iio_dev
*indio_dev
, const void *data
)
936 struct iio_buffer
*buf
;
938 list_for_each_entry(buf
, &indio_dev
->buffer_list
, buffer_list
) {
939 ret
= iio_push_to_buffer(buf
, data
);
946 EXPORT_SYMBOL_GPL(iio_push_to_buffers
);
948 static int iio_buffer_update_demux(struct iio_dev
*indio_dev
,
949 struct iio_buffer
*buffer
)
951 const struct iio_chan_spec
*ch
;
952 int ret
, in_ind
= -1, out_ind
, length
;
953 unsigned in_loc
= 0, out_loc
= 0;
954 struct iio_demux_table
*p
;
956 /* Clear out any old demux */
957 iio_buffer_demux_free(buffer
);
958 kfree(buffer
->demux_bounce
);
959 buffer
->demux_bounce
= NULL
;
961 /* First work out which scan mode we will actually have */
962 if (bitmap_equal(indio_dev
->active_scan_mask
,
964 indio_dev
->masklength
))
967 /* Now we have the two masks, work from least sig and build up sizes */
968 for_each_set_bit(out_ind
,
970 indio_dev
->masklength
) {
971 in_ind
= find_next_bit(indio_dev
->active_scan_mask
,
972 indio_dev
->masklength
,
974 while (in_ind
!= out_ind
) {
975 in_ind
= find_next_bit(indio_dev
->active_scan_mask
,
976 indio_dev
->masklength
,
978 ch
= iio_find_channel_from_si(indio_dev
, in_ind
);
979 if (ch
->scan_type
.repeat
> 1)
980 length
= ch
->scan_type
.storagebits
/ 8 *
981 ch
->scan_type
.repeat
;
983 length
= ch
->scan_type
.storagebits
/ 8;
984 /* Make sure we are aligned */
987 in_loc
+= length
- in_loc
% length
;
989 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
992 goto error_clear_mux_table
;
994 ch
= iio_find_channel_from_si(indio_dev
, in_ind
);
995 if (ch
->scan_type
.repeat
> 1)
996 length
= ch
->scan_type
.storagebits
/ 8 *
997 ch
->scan_type
.repeat
;
999 length
= ch
->scan_type
.storagebits
/ 8;
1000 if (out_loc
% length
)
1001 out_loc
+= length
- out_loc
% length
;
1002 if (in_loc
% length
)
1003 in_loc
+= length
- in_loc
% length
;
1007 list_add_tail(&p
->l
, &buffer
->demux_list
);
1011 /* Relies on scan_timestamp being last */
1012 if (buffer
->scan_timestamp
) {
1013 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
1016 goto error_clear_mux_table
;
1018 ch
= iio_find_channel_from_si(indio_dev
,
1019 indio_dev
->scan_index_timestamp
);
1020 if (ch
->scan_type
.repeat
> 1)
1021 length
= ch
->scan_type
.storagebits
/ 8 *
1022 ch
->scan_type
.repeat
;
1024 length
= ch
->scan_type
.storagebits
/ 8;
1025 if (out_loc
% length
)
1026 out_loc
+= length
- out_loc
% length
;
1027 if (in_loc
% length
)
1028 in_loc
+= length
- in_loc
% length
;
1032 list_add_tail(&p
->l
, &buffer
->demux_list
);
1036 buffer
->demux_bounce
= kzalloc(out_loc
, GFP_KERNEL
);
1037 if (buffer
->demux_bounce
== NULL
) {
1039 goto error_clear_mux_table
;
1043 error_clear_mux_table
:
1044 iio_buffer_demux_free(buffer
);
1049 int iio_update_demux(struct iio_dev
*indio_dev
)
1051 struct iio_buffer
*buffer
;
1054 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
) {
1055 ret
= iio_buffer_update_demux(indio_dev
, buffer
);
1057 goto error_clear_mux_table
;
1061 error_clear_mux_table
:
1062 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
)
1063 iio_buffer_demux_free(buffer
);
1067 EXPORT_SYMBOL_GPL(iio_update_demux
);
1070 * iio_buffer_release() - Free a buffer's resources
1071 * @ref: Pointer to the kref embedded in the iio_buffer struct
1073 * This function is called when the last reference to the buffer has been
1074 * dropped. It will typically free all resources allocated by the buffer. Do not
1075 * call this function manually, always use iio_buffer_put() when done using a
1078 static void iio_buffer_release(struct kref
*ref
)
1080 struct iio_buffer
*buffer
= container_of(ref
, struct iio_buffer
, ref
);
1082 buffer
->access
->release(buffer
);
1086 * iio_buffer_get() - Grab a reference to the buffer
1087 * @buffer: The buffer to grab a reference for, may be NULL
1089 * Returns the pointer to the buffer that was passed into the function.
1091 struct iio_buffer
*iio_buffer_get(struct iio_buffer
*buffer
)
1094 kref_get(&buffer
->ref
);
1098 EXPORT_SYMBOL_GPL(iio_buffer_get
);
1101 * iio_buffer_put() - Release the reference to the buffer
1102 * @buffer: The buffer to release the reference for, may be NULL
1104 void iio_buffer_put(struct iio_buffer
*buffer
)
1107 kref_put(&buffer
->ref
, iio_buffer_release
);
1109 EXPORT_SYMBOL_GPL(iio_buffer_put
);