Merge remote-tracking branch 'regulator/fix/max8998' into tmp
[deliverable/linux.git] / drivers / iio / inkern.c
CommitLineData
e27d75d7
JC
1/* The industrial I/O core in kernel channel mapping
2 *
3 * Copyright (c) 2011 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9#include <linux/err.h>
10#include <linux/export.h>
11#include <linux/slab.h>
12#include <linux/mutex.h>
13
06458e27 14#include <linux/iio/iio.h>
e27d75d7 15#include "iio_core.h"
06458e27
JC
16#include <linux/iio/machine.h>
17#include <linux/iio/driver.h>
18#include <linux/iio/consumer.h>
e27d75d7
JC
19
20struct iio_map_internal {
21 struct iio_dev *indio_dev;
22 struct iio_map *map;
23 struct list_head l;
24};
25
26static LIST_HEAD(iio_map_list);
27static DEFINE_MUTEX(iio_map_list_lock);
28
29int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
30{
31 int i = 0, ret = 0;
32 struct iio_map_internal *mapi;
33
34 if (maps == NULL)
35 return 0;
36
37 mutex_lock(&iio_map_list_lock);
38 while (maps[i].consumer_dev_name != NULL) {
39 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
40 if (mapi == NULL) {
41 ret = -ENOMEM;
42 goto error_ret;
43 }
44 mapi->map = &maps[i];
45 mapi->indio_dev = indio_dev;
46 list_add(&mapi->l, &iio_map_list);
47 i++;
48 }
49error_ret:
50 mutex_unlock(&iio_map_list_lock);
51
52 return ret;
53}
54EXPORT_SYMBOL_GPL(iio_map_array_register);
55
56
57/* Assumes the exact same array (e.g. memory locations)
58 * used at unregistration as used at registration rather than
59 * more complex checking of contents.
60 */
61int iio_map_array_unregister(struct iio_dev *indio_dev,
62 struct iio_map *maps)
63{
64 int i = 0, ret = 0;
65 bool found_it;
66 struct iio_map_internal *mapi;
67
68 if (maps == NULL)
69 return 0;
70
71 mutex_lock(&iio_map_list_lock);
72 while (maps[i].consumer_dev_name != NULL) {
73 found_it = false;
74 list_for_each_entry(mapi, &iio_map_list, l)
75 if (&maps[i] == mapi->map) {
76 list_del(&mapi->l);
77 kfree(mapi);
78 found_it = true;
79 break;
80 }
7737fa6d 81 if (!found_it) {
e27d75d7
JC
82 ret = -ENODEV;
83 goto error_ret;
84 }
218f4d43 85 i++;
e27d75d7
JC
86 }
87error_ret:
88 mutex_unlock(&iio_map_list_lock);
89
90 return ret;
91}
92EXPORT_SYMBOL_GPL(iio_map_array_unregister);
93
94static const struct iio_chan_spec
314be14b 95*iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
e27d75d7
JC
96{
97 int i;
98 const struct iio_chan_spec *chan = NULL;
99
100 for (i = 0; i < indio_dev->num_channels; i++)
101 if (indio_dev->channels[i].datasheet_name &&
102 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
103 chan = &indio_dev->channels[i];
104 break;
105 }
106 return chan;
107}
108
109
314be14b 110struct iio_channel *iio_channel_get(const char *name, const char *channel_name)
e27d75d7
JC
111{
112 struct iio_map_internal *c_i = NULL, *c = NULL;
113 struct iio_channel *channel;
3183bac1 114 int err;
e27d75d7
JC
115
116 if (name == NULL && channel_name == NULL)
117 return ERR_PTR(-ENODEV);
118
119 /* first find matching entry the channel map */
120 mutex_lock(&iio_map_list_lock);
121 list_for_each_entry(c_i, &iio_map_list, l) {
122 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
123 (channel_name &&
124 strcmp(channel_name, c_i->map->consumer_channel) != 0))
125 continue;
126 c = c_i;
1875ffd2 127 iio_device_get(c->indio_dev);
e27d75d7
JC
128 break;
129 }
130 mutex_unlock(&iio_map_list_lock);
131 if (c == NULL)
132 return ERR_PTR(-ENODEV);
133
2cc412b5 134 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
3183bac1
KM
135 if (channel == NULL) {
136 err = -ENOMEM;
801c4b5c 137 goto error_no_mem;
3183bac1 138 }
e27d75d7
JC
139
140 channel->indio_dev = c->indio_dev;
141
b2b79ffa 142 if (c->map->adc_channel_label) {
e27d75d7
JC
143 channel->channel =
144 iio_chan_spec_from_name(channel->indio_dev,
145 c->map->adc_channel_label);
146
3183bac1
KM
147 if (channel->channel == NULL) {
148 err = -EINVAL;
b2b79ffa 149 goto error_no_chan;
3183bac1 150 }
b2b79ffa
KM
151 }
152
e27d75d7 153 return channel;
b2b79ffa
KM
154
155error_no_chan:
b2b79ffa 156 kfree(channel);
801c4b5c
KM
157error_no_mem:
158 iio_device_put(c->indio_dev);
3183bac1 159 return ERR_PTR(err);
e27d75d7 160}
314be14b 161EXPORT_SYMBOL_GPL(iio_channel_get);
e27d75d7 162
314be14b 163void iio_channel_release(struct iio_channel *channel)
e27d75d7 164{
1875ffd2 165 iio_device_put(channel->indio_dev);
e27d75d7
JC
166 kfree(channel);
167}
314be14b 168EXPORT_SYMBOL_GPL(iio_channel_release);
e27d75d7 169
314be14b 170struct iio_channel *iio_channel_get_all(const char *name)
e27d75d7
JC
171{
172 struct iio_channel *chans;
173 struct iio_map_internal *c = NULL;
174 int nummaps = 0;
175 int mapind = 0;
176 int i, ret;
177
178 if (name == NULL)
179 return ERR_PTR(-EINVAL);
180
181 mutex_lock(&iio_map_list_lock);
182 /* first count the matching maps */
183 list_for_each_entry(c, &iio_map_list, l)
184 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
185 continue;
186 else
187 nummaps++;
188
189 if (nummaps == 0) {
190 ret = -ENODEV;
191 goto error_ret;
192 }
193
194 /* NULL terminated array to save passing size */
195 chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
196 if (chans == NULL) {
197 ret = -ENOMEM;
198 goto error_ret;
199 }
200
201 /* for each map fill in the chans element */
202 list_for_each_entry(c, &iio_map_list, l) {
203 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
204 continue;
205 chans[mapind].indio_dev = c->indio_dev;
0464415d 206 chans[mapind].data = c->map->consumer_data;
e27d75d7
JC
207 chans[mapind].channel =
208 iio_chan_spec_from_name(chans[mapind].indio_dev,
209 c->map->adc_channel_label);
210 if (chans[mapind].channel == NULL) {
211 ret = -EINVAL;
e27d75d7
JC
212 goto error_free_chans;
213 }
1875ffd2 214 iio_device_get(chans[mapind].indio_dev);
e27d75d7
JC
215 mapind++;
216 }
e27d75d7
JC
217 if (mapind == 0) {
218 ret = -ENODEV;
219 goto error_free_chans;
220 }
e59b9afe
DC
221 mutex_unlock(&iio_map_list_lock);
222
e27d75d7
JC
223 return chans;
224
225error_free_chans:
226 for (i = 0; i < nummaps; i++)
1875ffd2 227 iio_device_put(chans[i].indio_dev);
e27d75d7
JC
228 kfree(chans);
229error_ret:
230 mutex_unlock(&iio_map_list_lock);
231
232 return ERR_PTR(ret);
233}
314be14b 234EXPORT_SYMBOL_GPL(iio_channel_get_all);
e27d75d7 235
314be14b 236void iio_channel_release_all(struct iio_channel *channels)
e27d75d7
JC
237{
238 struct iio_channel *chan = &channels[0];
239
240 while (chan->indio_dev) {
1875ffd2 241 iio_device_put(chan->indio_dev);
e27d75d7
JC
242 chan++;
243 }
244 kfree(channels);
245}
314be14b 246EXPORT_SYMBOL_GPL(iio_channel_release_all);
e27d75d7 247
48e44ce0
LPC
248static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
249 enum iio_chan_info_enum info)
250{
251 int unused;
252
253 if (val2 == NULL)
254 val2 = &unused;
255
256 return chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
257 val, val2, info);
258}
259
314be14b 260int iio_read_channel_raw(struct iio_channel *chan, int *val)
e27d75d7 261{
48e44ce0 262 int ret;
e27d75d7
JC
263
264 mutex_lock(&chan->indio_dev->info_exist_lock);
265 if (chan->indio_dev->info == NULL) {
266 ret = -ENODEV;
267 goto err_unlock;
268 }
269
48e44ce0 270 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
e27d75d7
JC
271err_unlock:
272 mutex_unlock(&chan->indio_dev->info_exist_lock);
273
274 return ret;
275}
314be14b 276EXPORT_SYMBOL_GPL(iio_read_channel_raw);
e27d75d7 277
48e44ce0
LPC
278static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
279 int raw, int *processed, unsigned int scale)
280{
281 int scale_type, scale_val, scale_val2, offset;
282 s64 raw64 = raw;
283 int ret;
284
285 ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_SCALE);
286 if (ret == 0)
287 raw64 += offset;
288
289 scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
290 IIO_CHAN_INFO_SCALE);
291 if (scale_type < 0)
292 return scale_type;
293
294 switch (scale_type) {
295 case IIO_VAL_INT:
296 *processed = raw64 * scale_val;
297 break;
298 case IIO_VAL_INT_PLUS_MICRO:
299 if (scale_val2 < 0)
300 *processed = -raw64 * scale_val;
301 else
302 *processed = raw64 * scale_val;
303 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
304 1000000LL);
305 break;
306 case IIO_VAL_INT_PLUS_NANO:
307 if (scale_val2 < 0)
308 *processed = -raw64 * scale_val;
309 else
310 *processed = raw64 * scale_val;
311 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
312 1000000000LL);
313 break;
314 case IIO_VAL_FRACTIONAL:
315 *processed = div_s64(raw64 * (s64)scale_val * scale,
316 scale_val2);
317 break;
103d9fb9
LPC
318 case IIO_VAL_FRACTIONAL_LOG2:
319 *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
320 break;
48e44ce0
LPC
321 default:
322 return -EINVAL;
323 }
324
325 return 0;
326}
327
328int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
329 int *processed, unsigned int scale)
330{
331 int ret;
332
333 mutex_lock(&chan->indio_dev->info_exist_lock);
334 if (chan->indio_dev->info == NULL) {
335 ret = -ENODEV;
336 goto err_unlock;
337 }
338
339 ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
340 scale);
341err_unlock:
342 mutex_unlock(&chan->indio_dev->info_exist_lock);
343
344 return ret;
345}
346EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
347
348int iio_read_channel_processed(struct iio_channel *chan, int *val)
349{
350 int ret;
351
352 mutex_lock(&chan->indio_dev->info_exist_lock);
353 if (chan->indio_dev->info == NULL) {
354 ret = -ENODEV;
355 goto err_unlock;
356 }
357
358 if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
359 ret = iio_channel_read(chan, val, NULL,
360 IIO_CHAN_INFO_PROCESSED);
361 } else {
362 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
363 if (ret < 0)
364 goto err_unlock;
365 ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1);
366 }
367
368err_unlock:
369 mutex_unlock(&chan->indio_dev->info_exist_lock);
370
371 return ret;
372}
373EXPORT_SYMBOL_GPL(iio_read_channel_processed);
374
314be14b 375int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
e27d75d7
JC
376{
377 int ret;
378
379 mutex_lock(&chan->indio_dev->info_exist_lock);
380 if (chan->indio_dev->info == NULL) {
381 ret = -ENODEV;
382 goto err_unlock;
383 }
384
48e44ce0 385 ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE);
e27d75d7
JC
386err_unlock:
387 mutex_unlock(&chan->indio_dev->info_exist_lock);
388
389 return ret;
390}
314be14b 391EXPORT_SYMBOL_GPL(iio_read_channel_scale);
e27d75d7 392
314be14b 393int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
e27d75d7
JC
394{
395 int ret = 0;
396 /* Need to verify underlying driver has not gone away */
397
398 mutex_lock(&chan->indio_dev->info_exist_lock);
399 if (chan->indio_dev->info == NULL) {
400 ret = -ENODEV;
401 goto err_unlock;
402 }
403
404 *type = chan->channel->type;
405err_unlock:
406 mutex_unlock(&chan->indio_dev->info_exist_lock);
407
408 return ret;
409}
314be14b 410EXPORT_SYMBOL_GPL(iio_get_channel_type);
This page took 0.118728 seconds and 5 git commands to generate.