]> Git Repo - linux.git/blob - drivers/iio/inkern.c
Merge patch series "RISC-V: Select ACPI PPTT drivers"
[linux.git] / drivers / iio / inkern.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core in kernel channel mapping
3  *
4  * Copyright (c) 2011 Jonathan Cameron
5  */
6 #include <linux/cleanup.h>
7 #include <linux/err.h>
8 #include <linux/export.h>
9 #include <linux/minmax.h>
10 #include <linux/mutex.h>
11 #include <linux/property.h>
12 #include <linux/slab.h>
13
14 #include <linux/iio/iio.h>
15 #include <linux/iio/iio-opaque.h>
16 #include "iio_core.h"
17 #include <linux/iio/machine.h>
18 #include <linux/iio/driver.h>
19 #include <linux/iio/consumer.h>
20
21 struct iio_map_internal {
22         struct iio_dev *indio_dev;
23         struct iio_map *map;
24         struct list_head l;
25 };
26
27 static LIST_HEAD(iio_map_list);
28 static DEFINE_MUTEX(iio_map_list_lock);
29
30 static int iio_map_array_unregister_locked(struct iio_dev *indio_dev)
31 {
32         int ret = -ENODEV;
33         struct iio_map_internal *mapi, *next;
34
35         list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
36                 if (indio_dev == mapi->indio_dev) {
37                         list_del(&mapi->l);
38                         kfree(mapi);
39                         ret = 0;
40                 }
41         }
42         return ret;
43 }
44
45 int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
46 {
47         struct iio_map_internal *mapi;
48         int i = 0;
49         int ret;
50
51         if (!maps)
52                 return 0;
53
54         guard(mutex)(&iio_map_list_lock);
55         while (maps[i].consumer_dev_name) {
56                 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
57                 if (!mapi) {
58                         ret = -ENOMEM;
59                         goto error_ret;
60                 }
61                 mapi->map = &maps[i];
62                 mapi->indio_dev = indio_dev;
63                 list_add_tail(&mapi->l, &iio_map_list);
64                 i++;
65         }
66
67         return 0;
68 error_ret:
69         iio_map_array_unregister_locked(indio_dev);
70         return ret;
71 }
72 EXPORT_SYMBOL_GPL(iio_map_array_register);
73
74 /*
75  * Remove all map entries associated with the given iio device
76  */
77 int iio_map_array_unregister(struct iio_dev *indio_dev)
78 {
79         guard(mutex)(&iio_map_list_lock);
80         return iio_map_array_unregister_locked(indio_dev);
81 }
82 EXPORT_SYMBOL_GPL(iio_map_array_unregister);
83
84 static void iio_map_array_unregister_cb(void *indio_dev)
85 {
86         iio_map_array_unregister(indio_dev);
87 }
88
89 int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps)
90 {
91         int ret;
92
93         ret = iio_map_array_register(indio_dev, maps);
94         if (ret)
95                 return ret;
96
97         return devm_add_action_or_reset(dev, iio_map_array_unregister_cb, indio_dev);
98 }
99 EXPORT_SYMBOL_GPL(devm_iio_map_array_register);
100
101 static const struct iio_chan_spec
102 *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
103 {
104         int i;
105         const struct iio_chan_spec *chan = NULL;
106
107         for (i = 0; i < indio_dev->num_channels; i++)
108                 if (indio_dev->channels[i].datasheet_name &&
109                     strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
110                         chan = &indio_dev->channels[i];
111                         break;
112                 }
113         return chan;
114 }
115
116 /**
117  * __fwnode_iio_simple_xlate - translate iiospec to the IIO channel index
118  * @indio_dev:  pointer to the iio_dev structure
119  * @iiospec:    IIO specifier as found in the device tree
120  *
121  * This is simple translation function, suitable for the most 1:1 mapped
122  * channels in IIO chips. This function performs only one sanity check:
123  * whether IIO index is less than num_channels (that is specified in the
124  * iio_dev).
125  */
126 static int __fwnode_iio_simple_xlate(struct iio_dev *indio_dev,
127                                      const struct fwnode_reference_args *iiospec)
128 {
129         if (!iiospec->nargs)
130                 return 0;
131
132         if (iiospec->args[0] >= indio_dev->num_channels) {
133                 dev_err(&indio_dev->dev, "invalid channel index %llu\n",
134                         iiospec->args[0]);
135                 return -EINVAL;
136         }
137
138         return iiospec->args[0];
139 }
140
141 static int __fwnode_iio_channel_get(struct iio_channel *channel,
142                                     struct fwnode_handle *fwnode, int index)
143 {
144         struct fwnode_reference_args iiospec;
145         struct device *idev;
146         struct iio_dev *indio_dev;
147         int err;
148
149         err = fwnode_property_get_reference_args(fwnode, "io-channels",
150                                                  "#io-channel-cells", 0,
151                                                  index, &iiospec);
152         if (err)
153                 return err;
154
155         idev = bus_find_device_by_fwnode(&iio_bus_type, iiospec.fwnode);
156         if (!idev) {
157                 fwnode_handle_put(iiospec.fwnode);
158                 return -EPROBE_DEFER;
159         }
160
161         indio_dev = dev_to_iio_dev(idev);
162         channel->indio_dev = indio_dev;
163         if (indio_dev->info->fwnode_xlate)
164                 index = indio_dev->info->fwnode_xlate(indio_dev, &iiospec);
165         else
166                 index = __fwnode_iio_simple_xlate(indio_dev, &iiospec);
167         fwnode_handle_put(iiospec.fwnode);
168         if (index < 0)
169                 goto err_put;
170         channel->channel = &indio_dev->channels[index];
171
172         return 0;
173
174 err_put:
175         iio_device_put(indio_dev);
176         return index;
177 }
178
179 static struct iio_channel *fwnode_iio_channel_get(struct fwnode_handle *fwnode,
180                                                   int index)
181 {
182         int err;
183
184         if (index < 0)
185                 return ERR_PTR(-EINVAL);
186
187         struct iio_channel *channel __free(kfree) =
188                 kzalloc(sizeof(*channel), GFP_KERNEL);
189         if (!channel)
190                 return ERR_PTR(-ENOMEM);
191
192         err = __fwnode_iio_channel_get(channel, fwnode, index);
193         if (err)
194                 return ERR_PTR(err);
195
196         return_ptr(channel);
197 }
198
199 static struct iio_channel *
200 __fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode, const char *name)
201 {
202         struct iio_channel *chan;
203         int index = 0;
204
205         /*
206          * For named iio channels, first look up the name in the
207          * "io-channel-names" property.  If it cannot be found, the
208          * index will be an error code, and fwnode_iio_channel_get()
209          * will fail.
210          */
211         if (name)
212                 index = fwnode_property_match_string(fwnode, "io-channel-names",
213                                                      name);
214
215         chan = fwnode_iio_channel_get(fwnode, index);
216         if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
217                 return chan;
218         if (name) {
219                 if (index >= 0) {
220                         pr_err("ERROR: could not get IIO channel %pfw:%s(%i)\n",
221                                fwnode, name, index);
222                         /*
223                          * In this case, we found 'name' in 'io-channel-names'
224                          * but somehow we still fail so that we should not proceed
225                          * with any other lookup. Hence, explicitly return -EINVAL
226                          * (maybe not the better error code) so that the caller
227                          * won't do a system lookup.
228                          */
229                         return ERR_PTR(-EINVAL);
230                 }
231                 /*
232                  * If index < 0, then fwnode_property_get_reference_args() fails
233                  * with -EINVAL or -ENOENT (ACPI case) which is expected. We
234                  * should not proceed if we get any other error.
235                  */
236                 if (PTR_ERR(chan) != -EINVAL && PTR_ERR(chan) != -ENOENT)
237                         return chan;
238         } else if (PTR_ERR(chan) != -ENOENT) {
239                 /*
240                  * if !name, then we should only proceed the lookup if
241                  * fwnode_property_get_reference_args() returns -ENOENT.
242                  */
243                 return chan;
244         }
245
246         /* so we continue the lookup */
247         return ERR_PTR(-ENODEV);
248 }
249
250 struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode,
251                                                    const char *name)
252 {
253         struct fwnode_handle *parent;
254         struct iio_channel *chan;
255
256         /* Walk up the tree of devices looking for a matching iio channel */
257         chan = __fwnode_iio_channel_get_by_name(fwnode, name);
258         if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV)
259                 return chan;
260
261         /*
262          * No matching IIO channel found on this node.
263          * If the parent node has a "io-channel-ranges" property,
264          * then we can try one of its channels.
265          */
266         fwnode_for_each_parent_node(fwnode, parent) {
267                 if (!fwnode_property_present(parent, "io-channel-ranges")) {
268                         fwnode_handle_put(parent);
269                         return ERR_PTR(-ENODEV);
270                 }
271
272                 chan = __fwnode_iio_channel_get_by_name(fwnode, name);
273                 if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV) {
274                         fwnode_handle_put(parent);
275                         return chan;
276                 }
277         }
278
279         return ERR_PTR(-ENODEV);
280 }
281 EXPORT_SYMBOL_GPL(fwnode_iio_channel_get_by_name);
282
283 static struct iio_channel *fwnode_iio_channel_get_all(struct device *dev)
284 {
285         struct fwnode_handle *fwnode = dev_fwnode(dev);
286         int i, mapind, nummaps = 0;
287         int ret;
288
289         do {
290                 ret = fwnode_property_get_reference_args(fwnode, "io-channels",
291                                                          "#io-channel-cells", 0,
292                                                          nummaps, NULL);
293                 if (ret < 0)
294                         break;
295         } while (++nummaps);
296
297         if (nummaps == 0)
298                 return ERR_PTR(-ENODEV);
299
300         /* NULL terminated array to save passing size */
301         struct iio_channel *chans __free(kfree) =
302                 kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
303         if (!chans)
304                 return ERR_PTR(-ENOMEM);
305
306         /* Search for FW matches */
307         for (mapind = 0; mapind < nummaps; mapind++) {
308                 ret = __fwnode_iio_channel_get(&chans[mapind], fwnode, mapind);
309                 if (ret)
310                         goto error_free_chans;
311         }
312         return_ptr(chans);
313
314 error_free_chans:
315         for (i = 0; i < mapind; i++)
316                 iio_device_put(chans[i].indio_dev);
317         return ERR_PTR(ret);
318 }
319
320 static struct iio_channel *iio_channel_get_sys(const char *name,
321                                                const char *channel_name)
322 {
323         struct iio_map_internal *c_i = NULL, *c = NULL;
324         int err;
325
326         if (!(name || channel_name))
327                 return ERR_PTR(-ENODEV);
328
329         /* first find matching entry the channel map */
330         scoped_guard(mutex, &iio_map_list_lock) {
331                 list_for_each_entry(c_i, &iio_map_list, l) {
332                         if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
333                             (channel_name &&
334                              strcmp(channel_name, c_i->map->consumer_channel) != 0))
335                                 continue;
336                         c = c_i;
337                         iio_device_get(c->indio_dev);
338                         break;
339                 }
340         }
341         if (!c)
342                 return ERR_PTR(-ENODEV);
343
344         struct iio_channel *channel __free(kfree) =
345                 kzalloc(sizeof(*channel), GFP_KERNEL);
346         if (!channel) {
347                 err = -ENOMEM;
348                 goto error_no_mem;
349         }
350
351         channel->indio_dev = c->indio_dev;
352
353         if (c->map->adc_channel_label) {
354                 channel->channel =
355                         iio_chan_spec_from_name(channel->indio_dev,
356                                                 c->map->adc_channel_label);
357
358                 if (!channel->channel) {
359                         err = -EINVAL;
360                         goto error_no_mem;
361                 }
362         }
363
364         return_ptr(channel);
365
366 error_no_mem:
367         iio_device_put(c->indio_dev);
368         return ERR_PTR(err);
369 }
370
371 struct iio_channel *iio_channel_get(struct device *dev,
372                                     const char *channel_name)
373 {
374         const char *name = dev ? dev_name(dev) : NULL;
375         struct iio_channel *channel;
376
377         if (dev) {
378                 channel = fwnode_iio_channel_get_by_name(dev_fwnode(dev),
379                                                          channel_name);
380                 if (!IS_ERR(channel) || PTR_ERR(channel) != -ENODEV)
381                         return channel;
382         }
383
384         return iio_channel_get_sys(name, channel_name);
385 }
386 EXPORT_SYMBOL_GPL(iio_channel_get);
387
388 void iio_channel_release(struct iio_channel *channel)
389 {
390         if (!channel)
391                 return;
392         iio_device_put(channel->indio_dev);
393         kfree(channel);
394 }
395 EXPORT_SYMBOL_GPL(iio_channel_release);
396
397 static void devm_iio_channel_free(void *iio_channel)
398 {
399         iio_channel_release(iio_channel);
400 }
401
402 struct iio_channel *devm_iio_channel_get(struct device *dev,
403                                          const char *channel_name)
404 {
405         struct iio_channel *channel;
406         int ret;
407
408         channel = iio_channel_get(dev, channel_name);
409         if (IS_ERR(channel))
410                 return channel;
411
412         ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
413         if (ret)
414                 return ERR_PTR(ret);
415
416         return channel;
417 }
418 EXPORT_SYMBOL_GPL(devm_iio_channel_get);
419
420 struct iio_channel *devm_fwnode_iio_channel_get_by_name(struct device *dev,
421                                                         struct fwnode_handle *fwnode,
422                                                         const char *channel_name)
423 {
424         struct iio_channel *channel;
425         int ret;
426
427         channel = fwnode_iio_channel_get_by_name(fwnode, channel_name);
428         if (IS_ERR(channel))
429                 return channel;
430
431         ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
432         if (ret)
433                 return ERR_PTR(ret);
434
435         return channel;
436 }
437 EXPORT_SYMBOL_GPL(devm_fwnode_iio_channel_get_by_name);
438
439 struct iio_channel *iio_channel_get_all(struct device *dev)
440 {
441         const char *name;
442         struct iio_map_internal *c = NULL;
443         struct iio_channel *fw_chans;
444         int nummaps = 0;
445         int mapind = 0;
446         int i, ret;
447
448         if (!dev)
449                 return ERR_PTR(-EINVAL);
450
451         fw_chans = fwnode_iio_channel_get_all(dev);
452         /*
453          * We only want to carry on if the error is -ENODEV.  Anything else
454          * should be reported up the stack.
455          */
456         if (!IS_ERR(fw_chans) || PTR_ERR(fw_chans) != -ENODEV)
457                 return fw_chans;
458
459         name = dev_name(dev);
460
461         guard(mutex)(&iio_map_list_lock);
462         /* first count the matching maps */
463         list_for_each_entry(c, &iio_map_list, l)
464                 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
465                         continue;
466                 else
467                         nummaps++;
468
469         if (nummaps == 0)
470                 return ERR_PTR(-ENODEV);
471
472         /* NULL terminated array to save passing size */
473         struct iio_channel *chans __free(kfree) =
474                 kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
475         if (!chans)
476                 return ERR_PTR(-ENOMEM);
477
478         /* for each map fill in the chans element */
479         list_for_each_entry(c, &iio_map_list, l) {
480                 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
481                         continue;
482                 chans[mapind].indio_dev = c->indio_dev;
483                 chans[mapind].data = c->map->consumer_data;
484                 chans[mapind].channel =
485                         iio_chan_spec_from_name(chans[mapind].indio_dev,
486                                                 c->map->adc_channel_label);
487                 if (!chans[mapind].channel) {
488                         ret = -EINVAL;
489                         goto error_free_chans;
490                 }
491                 iio_device_get(chans[mapind].indio_dev);
492                 mapind++;
493         }
494         if (mapind == 0) {
495                 ret = -ENODEV;
496                 goto error_free_chans;
497         }
498
499         return_ptr(chans);
500
501 error_free_chans:
502         for (i = 0; i < nummaps; i++)
503                 iio_device_put(chans[i].indio_dev);
504         return ERR_PTR(ret);
505 }
506 EXPORT_SYMBOL_GPL(iio_channel_get_all);
507
508 void iio_channel_release_all(struct iio_channel *channels)
509 {
510         struct iio_channel *chan = &channels[0];
511
512         while (chan->indio_dev) {
513                 iio_device_put(chan->indio_dev);
514                 chan++;
515         }
516         kfree(channels);
517 }
518 EXPORT_SYMBOL_GPL(iio_channel_release_all);
519
520 static void devm_iio_channel_free_all(void *iio_channels)
521 {
522         iio_channel_release_all(iio_channels);
523 }
524
525 struct iio_channel *devm_iio_channel_get_all(struct device *dev)
526 {
527         struct iio_channel *channels;
528         int ret;
529
530         channels = iio_channel_get_all(dev);
531         if (IS_ERR(channels))
532                 return channels;
533
534         ret = devm_add_action_or_reset(dev, devm_iio_channel_free_all,
535                                        channels);
536         if (ret)
537                 return ERR_PTR(ret);
538
539         return channels;
540 }
541 EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
542
543 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
544                             enum iio_chan_info_enum info)
545 {
546         const struct iio_info *iio_info = chan->indio_dev->info;
547         int unused;
548         int vals[INDIO_MAX_RAW_ELEMENTS];
549         int ret;
550         int val_len = 2;
551
552         if (!val2)
553                 val2 = &unused;
554
555         if (!iio_channel_has_info(chan->channel, info))
556                 return -EINVAL;
557
558         if (iio_info->read_raw_multi) {
559                 ret = iio_info->read_raw_multi(chan->indio_dev,
560                                                chan->channel,
561                                                INDIO_MAX_RAW_ELEMENTS,
562                                                vals, &val_len, info);
563                 *val = vals[0];
564                 *val2 = vals[1];
565         } else if (iio_info->read_raw) {
566                 ret = iio_info->read_raw(chan->indio_dev,
567                                          chan->channel, val, val2, info);
568         } else {
569                 return -EINVAL;
570         }
571
572         return ret;
573 }
574
575 int iio_read_channel_raw(struct iio_channel *chan, int *val)
576 {
577         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
578
579         guard(mutex)(&iio_dev_opaque->info_exist_lock);
580         if (!chan->indio_dev->info)
581                 return -ENODEV;
582
583         return iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
584 }
585 EXPORT_SYMBOL_GPL(iio_read_channel_raw);
586
587 int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
588 {
589         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
590
591         guard(mutex)(&iio_dev_opaque->info_exist_lock);
592         if (!chan->indio_dev->info)
593                 return -ENODEV;
594
595         return iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
596 }
597 EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
598
599 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
600                                                  int raw, int *processed,
601                                                  unsigned int scale)
602 {
603         int scale_type, scale_val, scale_val2;
604         int offset_type, offset_val, offset_val2;
605         s64 raw64 = raw;
606
607         offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
608                                        IIO_CHAN_INFO_OFFSET);
609         if (offset_type >= 0) {
610                 switch (offset_type) {
611                 case IIO_VAL_INT:
612                         break;
613                 case IIO_VAL_INT_PLUS_MICRO:
614                 case IIO_VAL_INT_PLUS_NANO:
615                         /*
616                          * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO
617                          * implicitely truncate the offset to it's integer form.
618                          */
619                         break;
620                 case IIO_VAL_FRACTIONAL:
621                         offset_val /= offset_val2;
622                         break;
623                 case IIO_VAL_FRACTIONAL_LOG2:
624                         offset_val >>= offset_val2;
625                         break;
626                 default:
627                         return -EINVAL;
628                 }
629
630                 raw64 += offset_val;
631         }
632
633         scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
634                                       IIO_CHAN_INFO_SCALE);
635         if (scale_type < 0) {
636                 /*
637                  * If no channel scaling is available apply consumer scale to
638                  * raw value and return.
639                  */
640                 *processed = raw * scale;
641                 return 0;
642         }
643
644         switch (scale_type) {
645         case IIO_VAL_INT:
646                 *processed = raw64 * scale_val * scale;
647                 break;
648         case IIO_VAL_INT_PLUS_MICRO:
649                 if (scale_val2 < 0)
650                         *processed = -raw64 * scale_val;
651                 else
652                         *processed = raw64 * scale_val;
653                 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
654                                       1000000LL);
655                 break;
656         case IIO_VAL_INT_PLUS_NANO:
657                 if (scale_val2 < 0)
658                         *processed = -raw64 * scale_val;
659                 else
660                         *processed = raw64 * scale_val;
661                 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
662                                       1000000000LL);
663                 break;
664         case IIO_VAL_FRACTIONAL:
665                 *processed = div_s64(raw64 * (s64)scale_val * scale,
666                                      scale_val2);
667                 break;
668         case IIO_VAL_FRACTIONAL_LOG2:
669                 *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
670                 break;
671         default:
672                 return -EINVAL;
673         }
674
675         return 0;
676 }
677
678 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
679                                  int *processed, unsigned int scale)
680 {
681         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
682
683         guard(mutex)(&iio_dev_opaque->info_exist_lock);
684         if (!chan->indio_dev->info)
685                 return -ENODEV;
686
687         return iio_convert_raw_to_processed_unlocked(chan, raw, processed,
688                                                      scale);
689 }
690 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
691
692 int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
693                                enum iio_chan_info_enum attribute)
694 {
695         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
696
697         guard(mutex)(&iio_dev_opaque->info_exist_lock);
698         if (!chan->indio_dev->info)
699                 return -ENODEV;
700
701         return iio_channel_read(chan, val, val2, attribute);
702 }
703 EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
704
705 int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
706 {
707         return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
708 }
709 EXPORT_SYMBOL_GPL(iio_read_channel_offset);
710
711 int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
712                                      unsigned int scale)
713 {
714         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
715         int ret;
716
717         guard(mutex)(&iio_dev_opaque->info_exist_lock);
718         if (!chan->indio_dev->info)
719                 return -ENODEV;
720
721         if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
722                 ret = iio_channel_read(chan, val, NULL,
723                                        IIO_CHAN_INFO_PROCESSED);
724                 if (ret < 0)
725                         return ret;
726                 *val *= scale;
727
728                 return ret;
729         } else {
730                 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
731                 if (ret < 0)
732                         return ret;
733
734                 return iio_convert_raw_to_processed_unlocked(chan, *val, val,
735                                                              scale);
736         }
737 }
738 EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale);
739
740 int iio_read_channel_processed(struct iio_channel *chan, int *val)
741 {
742         /* This is just a special case with scale factor 1 */
743         return iio_read_channel_processed_scale(chan, val, 1);
744 }
745 EXPORT_SYMBOL_GPL(iio_read_channel_processed);
746
747 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
748 {
749         return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
750 }
751 EXPORT_SYMBOL_GPL(iio_read_channel_scale);
752
753 static int iio_channel_read_avail(struct iio_channel *chan,
754                                   const int **vals, int *type, int *length,
755                                   enum iio_chan_info_enum info)
756 {
757         const struct iio_info *iio_info = chan->indio_dev->info;
758
759         if (!iio_channel_has_available(chan->channel, info))
760                 return -EINVAL;
761
762         if (iio_info->read_avail)
763                 return iio_info->read_avail(chan->indio_dev, chan->channel,
764                                             vals, type, length, info);
765         return -EINVAL;
766 }
767
768 int iio_read_avail_channel_attribute(struct iio_channel *chan,
769                                      const int **vals, int *type, int *length,
770                                      enum iio_chan_info_enum attribute)
771 {
772         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
773
774         guard(mutex)(&iio_dev_opaque->info_exist_lock);
775         if (!chan->indio_dev->info)
776                 return -ENODEV;
777
778         return iio_channel_read_avail(chan, vals, type, length, attribute);
779 }
780 EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
781
782 int iio_read_avail_channel_raw(struct iio_channel *chan,
783                                const int **vals, int *length)
784 {
785         int ret;
786         int type;
787
788         ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
789                                                IIO_CHAN_INFO_RAW);
790
791         if (ret >= 0 && type != IIO_VAL_INT)
792                 /* raw values are assumed to be IIO_VAL_INT */
793                 ret = -EINVAL;
794
795         return ret;
796 }
797 EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
798
799 static int iio_channel_read_max(struct iio_channel *chan,
800                                 int *val, int *val2, int *type,
801                                 enum iio_chan_info_enum info)
802 {
803         const int *vals;
804         int length;
805         int ret;
806
807         ret = iio_channel_read_avail(chan, &vals, type, &length, info);
808         if (ret < 0)
809                 return ret;
810
811         switch (ret) {
812         case IIO_AVAIL_RANGE:
813                 switch (*type) {
814                 case IIO_VAL_INT:
815                         *val = vals[2];
816                         break;
817                 default:
818                         *val = vals[4];
819                         if (val2)
820                                 *val2 = vals[5];
821                 }
822                 return 0;
823
824         case IIO_AVAIL_LIST:
825                 if (length <= 0)
826                         return -EINVAL;
827                 switch (*type) {
828                 case IIO_VAL_INT:
829                         *val = max_array(vals, length);
830                         break;
831                 default:
832                         /* TODO: learn about max for other iio values */
833                         return -EINVAL;
834                 }
835                 return 0;
836
837         default:
838                 return -EINVAL;
839         }
840 }
841
842 int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
843 {
844         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
845         int type;
846
847         guard(mutex)(&iio_dev_opaque->info_exist_lock);
848         if (!chan->indio_dev->info)
849                 return -ENODEV;
850
851         return iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
852 }
853 EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
854
855 static int iio_channel_read_min(struct iio_channel *chan,
856                                 int *val, int *val2, int *type,
857                                 enum iio_chan_info_enum info)
858 {
859         const int *vals;
860         int length;
861         int ret;
862
863         ret = iio_channel_read_avail(chan, &vals, type, &length, info);
864         if (ret < 0)
865                 return ret;
866
867         switch (ret) {
868         case IIO_AVAIL_RANGE:
869                 switch (*type) {
870                 case IIO_VAL_INT:
871                         *val = vals[0];
872                         break;
873                 default:
874                         *val = vals[0];
875                         if (val2)
876                                 *val2 = vals[1];
877                 }
878                 return 0;
879
880         case IIO_AVAIL_LIST:
881                 if (length <= 0)
882                         return -EINVAL;
883                 switch (*type) {
884                 case IIO_VAL_INT:
885                         *val = min_array(vals, length);
886                         break;
887                 default:
888                         /* TODO: learn about min for other iio values */
889                         return -EINVAL;
890                 }
891                 return 0;
892
893         default:
894                 return -EINVAL;
895         }
896 }
897
898 int iio_read_min_channel_raw(struct iio_channel *chan, int *val)
899 {
900         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
901         int type;
902
903         guard(mutex)(&iio_dev_opaque->info_exist_lock);
904         if (!chan->indio_dev->info)
905                 return -ENODEV;
906
907         return iio_channel_read_min(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
908 }
909 EXPORT_SYMBOL_GPL(iio_read_min_channel_raw);
910
911 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
912 {
913         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
914
915         guard(mutex)(&iio_dev_opaque->info_exist_lock);
916         if (!chan->indio_dev->info)
917                 return -ENODEV;
918
919         *type = chan->channel->type;
920
921         return 0;
922 }
923 EXPORT_SYMBOL_GPL(iio_get_channel_type);
924
925 static int iio_channel_write(struct iio_channel *chan, int val, int val2,
926                              enum iio_chan_info_enum info)
927 {
928         const struct iio_info *iio_info = chan->indio_dev->info;
929
930         if (iio_info->write_raw)
931                 return iio_info->write_raw(chan->indio_dev,
932                                            chan->channel, val, val2, info);
933         return -EINVAL;
934 }
935
936 int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
937                                 enum iio_chan_info_enum attribute)
938 {
939         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
940
941         guard(mutex)(&iio_dev_opaque->info_exist_lock);
942         if (!chan->indio_dev->info)
943                 return -ENODEV;
944
945         return iio_channel_write(chan, val, val2, attribute);
946 }
947 EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
948
949 int iio_write_channel_raw(struct iio_channel *chan, int val)
950 {
951         return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
952 }
953 EXPORT_SYMBOL_GPL(iio_write_channel_raw);
954
955 unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
956 {
957         const struct iio_chan_spec_ext_info *ext_info;
958         unsigned int i = 0;
959
960         if (!chan->channel->ext_info)
961                 return i;
962
963         for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
964                 ++i;
965
966         return i;
967 }
968 EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
969
970 static const struct iio_chan_spec_ext_info *
971 iio_lookup_ext_info(const struct iio_channel *chan, const char *attr)
972 {
973         const struct iio_chan_spec_ext_info *ext_info;
974
975         if (!chan->channel->ext_info)
976                 return NULL;
977
978         for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
979                 if (!strcmp(attr, ext_info->name))
980                         return ext_info;
981         }
982
983         return NULL;
984 }
985
986 ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
987                                   const char *attr, char *buf)
988 {
989         const struct iio_chan_spec_ext_info *ext_info;
990
991         ext_info = iio_lookup_ext_info(chan, attr);
992         if (!ext_info)
993                 return -EINVAL;
994
995         return ext_info->read(chan->indio_dev, ext_info->private,
996                               chan->channel, buf);
997 }
998 EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
999
1000 ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
1001                                    const char *buf, size_t len)
1002 {
1003         const struct iio_chan_spec_ext_info *ext_info;
1004
1005         ext_info = iio_lookup_ext_info(chan, attr);
1006         if (!ext_info)
1007                 return -EINVAL;
1008
1009         return ext_info->write(chan->indio_dev, ext_info->private,
1010                                chan->channel, buf, len);
1011 }
1012 EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);
1013
1014 ssize_t iio_read_channel_label(struct iio_channel *chan, char *buf)
1015 {
1016         return do_iio_read_channel_label(chan->indio_dev, chan->channel, buf);
1017 }
1018 EXPORT_SYMBOL_GPL(iio_read_channel_label);
This page took 0.096722 seconds and 4 git commands to generate.