Merge branch 'turbostat' of https://git.kernel.org/pub/scm/linux/kernel/git/lenb...
[deliverable/linux.git] / drivers / iio / buffer_cb.c
1 #include <linux/kernel.h>
2 #include <linux/slab.h>
3 #include <linux/err.h>
4 #include <linux/export.h>
5 #include <linux/iio/buffer.h>
6 #include <linux/iio/consumer.h>
7
8 struct iio_cb_buffer {
9 struct iio_buffer buffer;
10 int (*cb)(const void *data, void *private);
11 void *private;
12 struct iio_channel *channels;
13 };
14
15 static struct iio_cb_buffer *buffer_to_cb_buffer(struct iio_buffer *buffer)
16 {
17 return container_of(buffer, struct iio_cb_buffer, buffer);
18 }
19
20 static int iio_buffer_cb_store_to(struct iio_buffer *buffer, const void *data)
21 {
22 struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
23 return cb_buff->cb(data, cb_buff->private);
24 }
25
26 static void iio_buffer_cb_release(struct iio_buffer *buffer)
27 {
28 struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
29 kfree(cb_buff->buffer.scan_mask);
30 kfree(cb_buff);
31 }
32
33 static const struct iio_buffer_access_funcs iio_cb_access = {
34 .store_to = &iio_buffer_cb_store_to,
35 .release = &iio_buffer_cb_release,
36
37 .modes = INDIO_BUFFER_SOFTWARE | INDIO_BUFFER_TRIGGERED,
38 };
39
40 struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
41 int (*cb)(const void *data,
42 void *private),
43 void *private)
44 {
45 int ret;
46 struct iio_cb_buffer *cb_buff;
47 struct iio_dev *indio_dev;
48 struct iio_channel *chan;
49
50 cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
51 if (cb_buff == NULL)
52 return ERR_PTR(-ENOMEM);
53
54 iio_buffer_init(&cb_buff->buffer);
55
56 cb_buff->private = private;
57 cb_buff->cb = cb;
58 cb_buff->buffer.access = &iio_cb_access;
59 INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
60
61 cb_buff->channels = iio_channel_get_all(dev);
62 if (IS_ERR(cb_buff->channels)) {
63 ret = PTR_ERR(cb_buff->channels);
64 goto error_free_cb_buff;
65 }
66
67 indio_dev = cb_buff->channels[0].indio_dev;
68 cb_buff->buffer.scan_mask
69 = kcalloc(BITS_TO_LONGS(indio_dev->masklength), sizeof(long),
70 GFP_KERNEL);
71 if (cb_buff->buffer.scan_mask == NULL) {
72 ret = -ENOMEM;
73 goto error_release_channels;
74 }
75 chan = &cb_buff->channels[0];
76 while (chan->indio_dev) {
77 if (chan->indio_dev != indio_dev) {
78 ret = -EINVAL;
79 goto error_free_scan_mask;
80 }
81 set_bit(chan->channel->scan_index,
82 cb_buff->buffer.scan_mask);
83 chan++;
84 }
85
86 return cb_buff;
87
88 error_free_scan_mask:
89 kfree(cb_buff->buffer.scan_mask);
90 error_release_channels:
91 iio_channel_release_all(cb_buff->channels);
92 error_free_cb_buff:
93 kfree(cb_buff);
94 return ERR_PTR(ret);
95 }
96 EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
97
98 int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
99 {
100 return iio_update_buffers(cb_buff->channels[0].indio_dev,
101 &cb_buff->buffer,
102 NULL);
103 }
104 EXPORT_SYMBOL_GPL(iio_channel_start_all_cb);
105
106 void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff)
107 {
108 iio_update_buffers(cb_buff->channels[0].indio_dev,
109 NULL,
110 &cb_buff->buffer);
111 }
112 EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
113
114 void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
115 {
116 iio_channel_release_all(cb_buff->channels);
117 iio_buffer_put(&cb_buff->buffer);
118 }
119 EXPORT_SYMBOL_GPL(iio_channel_release_all_cb);
120
121 struct iio_channel
122 *iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer)
123 {
124 return cb_buffer->channels;
125 }
126 EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);
This page took 0.037078 seconds and 6 git commands to generate.