1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core
4 * Copyright (c) 2008 Jonathan Cameron
6 * Based on elements of hwmon and input subsystems.
9 #define pr_fmt(fmt) "iio-core: " fmt
11 #include <linux/anon_inodes.h>
12 #include <linux/cdev.h>
13 #include <linux/debugfs.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
17 #include <linux/idr.h>
18 #include <linux/kdev_t.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/poll.h>
23 #include <linux/property.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/wait.h>
28 #include <linux/iio/buffer.h>
29 #include <linux/iio/buffer_impl.h>
30 #include <linux/iio/events.h>
31 #include <linux/iio/iio-opaque.h>
32 #include <linux/iio/iio.h>
33 #include <linux/iio/sysfs.h>
36 #include "iio_core_trigger.h"
38 /* IDA to assign each registered device a unique id */
39 static DEFINE_IDA(iio_ida);
41 static dev_t iio_devt;
43 #define IIO_DEV_MAX 256
44 struct bus_type iio_bus_type = {
47 EXPORT_SYMBOL(iio_bus_type);
49 static struct dentry *iio_debugfs_dentry;
51 static const char * const iio_direction[] = {
56 static const char * const iio_chan_type_name_spec[] = {
57 [IIO_VOLTAGE] = "voltage",
58 [IIO_CURRENT] = "current",
59 [IIO_POWER] = "power",
60 [IIO_ACCEL] = "accel",
61 [IIO_ANGL_VEL] = "anglvel",
63 [IIO_LIGHT] = "illuminance",
64 [IIO_INTENSITY] = "intensity",
65 [IIO_PROXIMITY] = "proximity",
67 [IIO_INCLI] = "incli",
70 [IIO_TIMESTAMP] = "timestamp",
71 [IIO_CAPACITANCE] = "capacitance",
72 [IIO_ALTVOLTAGE] = "altvoltage",
74 [IIO_PRESSURE] = "pressure",
75 [IIO_HUMIDITYRELATIVE] = "humidityrelative",
76 [IIO_ACTIVITY] = "activity",
77 [IIO_STEPS] = "steps",
78 [IIO_ENERGY] = "energy",
79 [IIO_DISTANCE] = "distance",
80 [IIO_VELOCITY] = "velocity",
81 [IIO_CONCENTRATION] = "concentration",
82 [IIO_RESISTANCE] = "resistance",
84 [IIO_UVINDEX] = "uvindex",
85 [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity",
86 [IIO_COUNT] = "count",
87 [IIO_INDEX] = "index",
88 [IIO_GRAVITY] = "gravity",
89 [IIO_POSITIONRELATIVE] = "positionrelative",
90 [IIO_PHASE] = "phase",
91 [IIO_MASSCONCENTRATION] = "massconcentration",
94 static const char * const iio_modifier_names[] = {
98 [IIO_MOD_X_AND_Y] = "x&y",
99 [IIO_MOD_X_AND_Z] = "x&z",
100 [IIO_MOD_Y_AND_Z] = "y&z",
101 [IIO_MOD_X_AND_Y_AND_Z] = "x&y&z",
102 [IIO_MOD_X_OR_Y] = "x|y",
103 [IIO_MOD_X_OR_Z] = "x|z",
104 [IIO_MOD_Y_OR_Z] = "y|z",
105 [IIO_MOD_X_OR_Y_OR_Z] = "x|y|z",
106 [IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)",
107 [IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2",
108 [IIO_MOD_LIGHT_BOTH] = "both",
109 [IIO_MOD_LIGHT_IR] = "ir",
110 [IIO_MOD_LIGHT_CLEAR] = "clear",
111 [IIO_MOD_LIGHT_RED] = "red",
112 [IIO_MOD_LIGHT_GREEN] = "green",
113 [IIO_MOD_LIGHT_BLUE] = "blue",
114 [IIO_MOD_LIGHT_UV] = "uv",
115 [IIO_MOD_LIGHT_DUV] = "duv",
116 [IIO_MOD_QUATERNION] = "quaternion",
117 [IIO_MOD_TEMP_AMBIENT] = "ambient",
118 [IIO_MOD_TEMP_OBJECT] = "object",
119 [IIO_MOD_NORTH_MAGN] = "from_north_magnetic",
120 [IIO_MOD_NORTH_TRUE] = "from_north_true",
121 [IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp",
122 [IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp",
123 [IIO_MOD_RUNNING] = "running",
124 [IIO_MOD_JOGGING] = "jogging",
125 [IIO_MOD_WALKING] = "walking",
126 [IIO_MOD_STILL] = "still",
127 [IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)",
130 [IIO_MOD_CO2] = "co2",
131 [IIO_MOD_VOC] = "voc",
132 [IIO_MOD_PM1] = "pm1",
133 [IIO_MOD_PM2P5] = "pm2p5",
134 [IIO_MOD_PM4] = "pm4",
135 [IIO_MOD_PM10] = "pm10",
136 [IIO_MOD_ETHANOL] = "ethanol",
139 [IIO_MOD_LINEAR_X] = "linear_x",
140 [IIO_MOD_LINEAR_Y] = "linear_y",
141 [IIO_MOD_LINEAR_Z] = "linear_z",
142 [IIO_MOD_PITCH] = "pitch",
143 [IIO_MOD_YAW] = "yaw",
144 [IIO_MOD_ROLL] = "roll",
147 /* relies on pairs of these shared then separate */
148 static const char * const iio_chan_info_postfix[] = {
149 [IIO_CHAN_INFO_RAW] = "raw",
150 [IIO_CHAN_INFO_PROCESSED] = "input",
151 [IIO_CHAN_INFO_SCALE] = "scale",
152 [IIO_CHAN_INFO_OFFSET] = "offset",
153 [IIO_CHAN_INFO_CALIBSCALE] = "calibscale",
154 [IIO_CHAN_INFO_CALIBBIAS] = "calibbias",
155 [IIO_CHAN_INFO_PEAK] = "peak_raw",
156 [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale",
157 [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw",
158 [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw",
159 [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY]
160 = "filter_low_pass_3db_frequency",
161 [IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY]
162 = "filter_high_pass_3db_frequency",
163 [IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency",
164 [IIO_CHAN_INFO_FREQUENCY] = "frequency",
165 [IIO_CHAN_INFO_PHASE] = "phase",
166 [IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain",
167 [IIO_CHAN_INFO_HYSTERESIS] = "hysteresis",
168 [IIO_CHAN_INFO_HYSTERESIS_RELATIVE] = "hysteresis_relative",
169 [IIO_CHAN_INFO_INT_TIME] = "integration_time",
170 [IIO_CHAN_INFO_ENABLE] = "en",
171 [IIO_CHAN_INFO_CALIBHEIGHT] = "calibheight",
172 [IIO_CHAN_INFO_CALIBWEIGHT] = "calibweight",
173 [IIO_CHAN_INFO_DEBOUNCE_COUNT] = "debounce_count",
174 [IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time",
175 [IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity",
176 [IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio",
177 [IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type",
178 [IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient",
179 [IIO_CHAN_INFO_ZEROPOINT] = "zeropoint",
182 * iio_device_id() - query the unique ID for the device
183 * @indio_dev: Device structure whose ID is being queried
185 * The IIO device ID is a unique index used for example for the naming
186 * of the character device /dev/iio\:device[ID]
188 int iio_device_id(struct iio_dev *indio_dev)
190 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
192 return iio_dev_opaque->id;
194 EXPORT_SYMBOL_GPL(iio_device_id);
197 * iio_buffer_enabled() - helper function to test if the buffer is enabled
198 * @indio_dev: IIO device structure for device
200 bool iio_buffer_enabled(struct iio_dev *indio_dev)
202 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
204 return iio_dev_opaque->currentmode
205 & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE |
206 INDIO_BUFFER_SOFTWARE);
208 EXPORT_SYMBOL_GPL(iio_buffer_enabled);
210 #if defined(CONFIG_DEBUG_FS)
212 * There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for
213 * iio_get_debugfs_dentry() to make it inline if CONFIG_DEBUG_FS is undefined
215 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
217 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
219 return iio_dev_opaque->debugfs_dentry;
221 EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry);
225 * iio_find_channel_from_si() - get channel from its scan index
227 * @si: scan index to match
229 const struct iio_chan_spec
230 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si)
234 for (i = 0; i < indio_dev->num_channels; i++)
235 if (indio_dev->channels[i].scan_index == si)
236 return &indio_dev->channels[i];
240 /* This turns up an awful lot */
241 ssize_t iio_read_const_attr(struct device *dev,
242 struct device_attribute *attr,
245 return sysfs_emit(buf, "%s\n", to_iio_const_attr(attr)->string);
247 EXPORT_SYMBOL(iio_read_const_attr);
250 * iio_device_set_clock() - Set current timestamping clock for the device
251 * @indio_dev: IIO device structure containing the device
252 * @clock_id: timestamping clock posix identifier to set.
254 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
257 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
258 const struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
260 ret = mutex_lock_interruptible(&iio_dev_opaque->mlock);
263 if ((ev_int && iio_event_enabled(ev_int)) ||
264 iio_buffer_enabled(indio_dev)) {
265 mutex_unlock(&iio_dev_opaque->mlock);
268 iio_dev_opaque->clock_id = clock_id;
269 mutex_unlock(&iio_dev_opaque->mlock);
273 EXPORT_SYMBOL(iio_device_set_clock);
276 * iio_device_get_clock() - Retrieve current timestamping clock for the device
277 * @indio_dev: IIO device structure containing the device
279 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev)
281 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
283 return iio_dev_opaque->clock_id;
285 EXPORT_SYMBOL(iio_device_get_clock);
288 * iio_get_time_ns() - utility function to get a time stamp for events etc
291 s64 iio_get_time_ns(const struct iio_dev *indio_dev)
293 struct timespec64 tp;
295 switch (iio_device_get_clock(indio_dev)) {
297 return ktime_get_real_ns();
298 case CLOCK_MONOTONIC:
299 return ktime_get_ns();
300 case CLOCK_MONOTONIC_RAW:
301 return ktime_get_raw_ns();
302 case CLOCK_REALTIME_COARSE:
303 return ktime_to_ns(ktime_get_coarse_real());
304 case CLOCK_MONOTONIC_COARSE:
305 ktime_get_coarse_ts64(&tp);
306 return timespec64_to_ns(&tp);
308 return ktime_get_boottime_ns();
310 return ktime_get_clocktai_ns();
315 EXPORT_SYMBOL(iio_get_time_ns);
317 static int __init iio_init(void)
321 /* Register sysfs bus */
322 ret = bus_register(&iio_bus_type);
324 pr_err("could not register bus type\n");
328 ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
330 pr_err("failed to allocate char dev region\n");
331 goto error_unregister_bus_type;
334 iio_debugfs_dentry = debugfs_create_dir("iio", NULL);
338 error_unregister_bus_type:
339 bus_unregister(&iio_bus_type);
344 static void __exit iio_exit(void)
347 unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
348 bus_unregister(&iio_bus_type);
349 debugfs_remove(iio_debugfs_dentry);
352 #if defined(CONFIG_DEBUG_FS)
353 static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
354 size_t count, loff_t *ppos)
356 struct iio_dev *indio_dev = file->private_data;
357 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
358 unsigned int val = 0;
362 return simple_read_from_buffer(userbuf, count, ppos,
363 iio_dev_opaque->read_buf,
364 iio_dev_opaque->read_buf_len);
366 ret = indio_dev->info->debugfs_reg_access(indio_dev,
367 iio_dev_opaque->cached_reg_addr,
370 dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
374 iio_dev_opaque->read_buf_len = snprintf(iio_dev_opaque->read_buf,
375 sizeof(iio_dev_opaque->read_buf),
378 return simple_read_from_buffer(userbuf, count, ppos,
379 iio_dev_opaque->read_buf,
380 iio_dev_opaque->read_buf_len);
383 static ssize_t iio_debugfs_write_reg(struct file *file,
384 const char __user *userbuf, size_t count, loff_t *ppos)
386 struct iio_dev *indio_dev = file->private_data;
387 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
388 unsigned int reg, val;
392 count = min_t(size_t, count, (sizeof(buf)-1));
393 if (copy_from_user(buf, userbuf, count))
398 ret = sscanf(buf, "%i %i", ®, &val);
402 iio_dev_opaque->cached_reg_addr = reg;
405 iio_dev_opaque->cached_reg_addr = reg;
406 ret = indio_dev->info->debugfs_reg_access(indio_dev, reg,
409 dev_err(indio_dev->dev.parent, "%s: write failed\n",
421 static const struct file_operations iio_debugfs_reg_fops = {
423 .read = iio_debugfs_read_reg,
424 .write = iio_debugfs_write_reg,
427 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
429 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
431 debugfs_remove_recursive(iio_dev_opaque->debugfs_dentry);
434 static void iio_device_register_debugfs(struct iio_dev *indio_dev)
436 struct iio_dev_opaque *iio_dev_opaque;
438 if (indio_dev->info->debugfs_reg_access == NULL)
441 if (!iio_debugfs_dentry)
444 iio_dev_opaque = to_iio_dev_opaque(indio_dev);
446 iio_dev_opaque->debugfs_dentry =
447 debugfs_create_dir(dev_name(&indio_dev->dev),
450 debugfs_create_file("direct_reg_access", 0644,
451 iio_dev_opaque->debugfs_dentry, indio_dev,
452 &iio_debugfs_reg_fops);
455 static void iio_device_register_debugfs(struct iio_dev *indio_dev)
459 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
462 #endif /* CONFIG_DEBUG_FS */
464 static ssize_t iio_read_channel_ext_info(struct device *dev,
465 struct device_attribute *attr,
468 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
469 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
470 const struct iio_chan_spec_ext_info *ext_info;
472 ext_info = &this_attr->c->ext_info[this_attr->address];
474 return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf);
477 static ssize_t iio_write_channel_ext_info(struct device *dev,
478 struct device_attribute *attr,
482 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
483 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
484 const struct iio_chan_spec_ext_info *ext_info;
486 ext_info = &this_attr->c->ext_info[this_attr->address];
488 return ext_info->write(indio_dev, ext_info->private,
489 this_attr->c, buf, len);
492 ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
493 uintptr_t priv, const struct iio_chan_spec *chan, char *buf)
495 const struct iio_enum *e = (const struct iio_enum *)priv;
502 for (i = 0; i < e->num_items; ++i) {
505 len += sysfs_emit_at(buf, len, "%s ", e->items[i]);
508 /* replace last space with a newline */
513 EXPORT_SYMBOL_GPL(iio_enum_available_read);
515 ssize_t iio_enum_read(struct iio_dev *indio_dev,
516 uintptr_t priv, const struct iio_chan_spec *chan, char *buf)
518 const struct iio_enum *e = (const struct iio_enum *)priv;
524 i = e->get(indio_dev, chan);
527 else if (i >= e->num_items || !e->items[i])
530 return sysfs_emit(buf, "%s\n", e->items[i]);
532 EXPORT_SYMBOL_GPL(iio_enum_read);
534 ssize_t iio_enum_write(struct iio_dev *indio_dev,
535 uintptr_t priv, const struct iio_chan_spec *chan, const char *buf,
538 const struct iio_enum *e = (const struct iio_enum *)priv;
544 ret = __sysfs_match_string(e->items, e->num_items, buf);
548 ret = e->set(indio_dev, chan, ret);
549 return ret ? ret : len;
551 EXPORT_SYMBOL_GPL(iio_enum_write);
553 static const struct iio_mount_matrix iio_mount_idmatrix = {
561 static int iio_setup_mount_idmatrix(const struct device *dev,
562 struct iio_mount_matrix *matrix)
564 *matrix = iio_mount_idmatrix;
565 dev_info(dev, "mounting matrix not found: using identity...\n");
569 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
570 const struct iio_chan_spec *chan, char *buf)
572 const struct iio_mount_matrix *mtx = ((iio_get_mount_matrix_t *)
573 priv)(indio_dev, chan);
579 mtx = &iio_mount_idmatrix;
581 return sysfs_emit(buf, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n",
582 mtx->rotation[0], mtx->rotation[1], mtx->rotation[2],
583 mtx->rotation[3], mtx->rotation[4], mtx->rotation[5],
584 mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]);
586 EXPORT_SYMBOL_GPL(iio_show_mount_matrix);
589 * iio_read_mount_matrix() - retrieve iio device mounting matrix from
590 * device "mount-matrix" property
591 * @dev: device the mounting matrix property is assigned to
592 * @matrix: where to store retrieved matrix
594 * If device is assigned no mounting matrix property, a default 3x3 identity
595 * matrix will be filled in.
597 * Return: 0 if success, or a negative error code on failure.
599 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix)
601 size_t len = ARRAY_SIZE(iio_mount_idmatrix.rotation);
604 err = device_property_read_string_array(dev, "mount-matrix", matrix->rotation, len);
609 /* Invalid number of matrix entries. */
613 /* Invalid matrix declaration format. */
616 /* Matrix was not declared at all: fallback to identity. */
617 return iio_setup_mount_idmatrix(dev, matrix);
619 EXPORT_SYMBOL(iio_read_mount_matrix);
621 static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type,
622 int size, const int *vals)
626 bool scale_db = false;
630 return sysfs_emit_at(buf, offset, "%d", vals[0]);
631 case IIO_VAL_INT_PLUS_MICRO_DB:
634 case IIO_VAL_INT_PLUS_MICRO:
636 return sysfs_emit_at(buf, offset, "-%d.%06u%s",
637 abs(vals[0]), -vals[1],
638 scale_db ? " dB" : "");
640 return sysfs_emit_at(buf, offset, "%d.%06u%s", vals[0],
641 vals[1], scale_db ? " dB" : "");
642 case IIO_VAL_INT_PLUS_NANO:
644 return sysfs_emit_at(buf, offset, "-%d.%09u",
645 abs(vals[0]), -vals[1]);
647 return sysfs_emit_at(buf, offset, "%d.%09u", vals[0],
649 case IIO_VAL_FRACTIONAL:
650 tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
652 tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1);
653 if ((tmp2 < 0) && (tmp0 == 0))
654 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1));
656 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0,
658 case IIO_VAL_FRACTIONAL_LOG2:
659 tmp2 = shift_right((s64)vals[0] * 1000000000LL, vals[1]);
660 tmp0 = (int)div_s64_rem(tmp2, 1000000000LL, &tmp1);
661 if (tmp0 == 0 && tmp2 < 0)
662 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1));
664 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0,
666 case IIO_VAL_INT_MULTIPLE:
671 for (i = 0; i < size; ++i)
672 l += sysfs_emit_at(buf, offset + l, "%d ", vals[i]);
676 return sysfs_emit_at(buf, offset, "%c", (char)vals[0]);
678 tmp2 = (s64)((((u64)vals[1]) << 32) | (u32)vals[0]);
679 return sysfs_emit_at(buf, offset, "%lld", tmp2);
686 * iio_format_value() - Formats a IIO value into its string representation
687 * @buf: The buffer to which the formatted value gets written
688 * which is assumed to be big enough (i.e. PAGE_SIZE).
689 * @type: One of the IIO_VAL_* constants. This decides how the val
690 * and val2 parameters are formatted.
691 * @size: Number of IIO value entries contained in vals
692 * @vals: Pointer to the values, exact meaning depends on the
695 * Return: 0 by default, a negative number on failure or the
696 * total number of characters written for a type that belongs
697 * to the IIO_VAL_* constant.
699 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
703 len = __iio_format_value(buf, 0, type, size, vals);
704 if (len >= PAGE_SIZE - 1)
707 return len + sysfs_emit_at(buf, len, "\n");
709 EXPORT_SYMBOL_GPL(iio_format_value);
711 static ssize_t iio_read_channel_label(struct device *dev,
712 struct device_attribute *attr,
715 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
716 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
718 if (indio_dev->info->read_label)
719 return indio_dev->info->read_label(indio_dev, this_attr->c, buf);
721 if (this_attr->c->extend_name)
722 return sysfs_emit(buf, "%s\n", this_attr->c->extend_name);
727 static ssize_t iio_read_channel_info(struct device *dev,
728 struct device_attribute *attr,
731 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
732 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
733 int vals[INDIO_MAX_RAW_ELEMENTS];
737 if (indio_dev->info->read_raw_multi)
738 ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c,
739 INDIO_MAX_RAW_ELEMENTS,
743 ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
744 &vals[0], &vals[1], this_attr->address);
749 return iio_format_value(buf, ret, val_len, vals);
752 static ssize_t iio_format_list(char *buf, const int *vals, int type, int length,
753 const char *prefix, const char *suffix)
768 len = sysfs_emit(buf, prefix);
770 for (i = 0; i <= length - stride; i += stride) {
772 len += sysfs_emit_at(buf, len, " ");
773 if (len >= PAGE_SIZE)
777 len += __iio_format_value(buf, len, type, stride, &vals[i]);
778 if (len >= PAGE_SIZE)
782 len += sysfs_emit_at(buf, len, "%s\n", suffix);
787 static ssize_t iio_format_avail_list(char *buf, const int *vals,
788 int type, int length)
791 return iio_format_list(buf, vals, type, length, "", "");
794 static ssize_t iio_format_avail_range(char *buf, const int *vals, int type)
799 * length refers to the array size , not the number of elements.
800 * The purpose is to print the range [min , step ,max] so length should
801 * be 3 in case of int, and 6 for other types.
812 return iio_format_list(buf, vals, type, length, "[", "]");
815 static ssize_t iio_read_channel_info_avail(struct device *dev,
816 struct device_attribute *attr,
819 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
820 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
826 ret = indio_dev->info->read_avail(indio_dev, this_attr->c,
827 &vals, &type, &length,
834 return iio_format_avail_list(buf, vals, type, length);
835 case IIO_AVAIL_RANGE:
836 return iio_format_avail_range(buf, vals, type);
843 * __iio_str_to_fixpoint() - Parse a fixed-point number from a string
844 * @str: The string to parse
845 * @fract_mult: Multiplier for the first decimal place, should be a power of 10
846 * @integer: The integer part of the number
847 * @fract: The fractional part of the number
848 * @scale_db: True if this should parse as dB
850 * Returns 0 on success, or a negative error code if the string could not be
853 static int __iio_str_to_fixpoint(const char *str, int fract_mult,
854 int *integer, int *fract, bool scale_db)
857 bool integer_part = true, negative = false;
859 if (fract_mult == 0) {
862 return kstrtoint(str, 0, integer);
868 } else if (str[0] == '+') {
873 if ('0' <= *str && *str <= '9') {
875 i = i * 10 + *str - '0';
877 f += fract_mult * (*str - '0');
880 } else if (*str == '\n') {
881 if (*(str + 1) == '\0')
884 } else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) {
885 /* Ignore the dB suffix */
886 str += sizeof(" dB") - 1;
888 } else if (!strncmp(str, "dB", sizeof("dB") - 1) && scale_db) {
889 /* Ignore the dB suffix */
890 str += sizeof("dB") - 1;
892 } else if (*str == '.' && integer_part) {
893 integer_part = false;
914 * iio_str_to_fixpoint() - Parse a fixed-point number from a string
915 * @str: The string to parse
916 * @fract_mult: Multiplier for the first decimal place, should be a power of 10
917 * @integer: The integer part of the number
918 * @fract: The fractional part of the number
920 * Returns 0 on success, or a negative error code if the string could not be
923 int iio_str_to_fixpoint(const char *str, int fract_mult,
924 int *integer, int *fract)
926 return __iio_str_to_fixpoint(str, fract_mult, integer, fract, false);
928 EXPORT_SYMBOL_GPL(iio_str_to_fixpoint);
930 static ssize_t iio_write_channel_info(struct device *dev,
931 struct device_attribute *attr,
935 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
936 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
937 int ret, fract_mult = 100000;
938 int integer, fract = 0;
939 bool is_char = false;
940 bool scale_db = false;
942 /* Assumes decimal - precision based on number of digits */
943 if (!indio_dev->info->write_raw)
946 if (indio_dev->info->write_raw_get_fmt)
947 switch (indio_dev->info->write_raw_get_fmt(indio_dev,
948 this_attr->c, this_attr->address)) {
952 case IIO_VAL_INT_PLUS_MICRO_DB:
955 case IIO_VAL_INT_PLUS_MICRO:
958 case IIO_VAL_INT_PLUS_NANO:
959 fract_mult = 100000000;
971 if (sscanf(buf, "%c", &ch) != 1)
975 ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract,
981 ret = indio_dev->info->write_raw(indio_dev, this_attr->c,
982 integer, fract, this_attr->address);
990 int __iio_device_attr_init(struct device_attribute *dev_attr,
992 struct iio_chan_spec const *chan,
993 ssize_t (*readfunc)(struct device *dev,
994 struct device_attribute *attr,
996 ssize_t (*writefunc)(struct device *dev,
997 struct device_attribute *attr,
1000 enum iio_shared_by shared_by)
1006 sysfs_attr_init(&dev_attr->attr);
1008 /* Build up postfix of <extend_name>_<modifier>_postfix */
1009 if (chan->modified && (shared_by == IIO_SEPARATE)) {
1010 if (chan->extend_name)
1011 full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
1012 iio_modifier_names[chan
1017 full_postfix = kasprintf(GFP_KERNEL, "%s_%s",
1018 iio_modifier_names[chan
1022 if (chan->extend_name == NULL || shared_by != IIO_SEPARATE)
1023 full_postfix = kstrdup(postfix, GFP_KERNEL);
1025 full_postfix = kasprintf(GFP_KERNEL,
1030 if (full_postfix == NULL)
1033 if (chan->differential) { /* Differential can not have modifier */
1034 switch (shared_by) {
1035 case IIO_SHARED_BY_ALL:
1036 name = kasprintf(GFP_KERNEL, "%s", full_postfix);
1038 case IIO_SHARED_BY_DIR:
1039 name = kasprintf(GFP_KERNEL, "%s_%s",
1040 iio_direction[chan->output],
1043 case IIO_SHARED_BY_TYPE:
1044 name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
1045 iio_direction[chan->output],
1046 iio_chan_type_name_spec[chan->type],
1047 iio_chan_type_name_spec[chan->type],
1051 if (!chan->indexed) {
1052 WARN(1, "Differential channels must be indexed\n");
1054 goto error_free_full_postfix;
1056 name = kasprintf(GFP_KERNEL,
1058 iio_direction[chan->output],
1059 iio_chan_type_name_spec[chan->type],
1061 iio_chan_type_name_spec[chan->type],
1066 } else { /* Single ended */
1067 switch (shared_by) {
1068 case IIO_SHARED_BY_ALL:
1069 name = kasprintf(GFP_KERNEL, "%s", full_postfix);
1071 case IIO_SHARED_BY_DIR:
1072 name = kasprintf(GFP_KERNEL, "%s_%s",
1073 iio_direction[chan->output],
1076 case IIO_SHARED_BY_TYPE:
1077 name = kasprintf(GFP_KERNEL, "%s_%s_%s",
1078 iio_direction[chan->output],
1079 iio_chan_type_name_spec[chan->type],
1085 name = kasprintf(GFP_KERNEL, "%s_%s%d_%s",
1086 iio_direction[chan->output],
1087 iio_chan_type_name_spec[chan->type],
1091 name = kasprintf(GFP_KERNEL, "%s_%s_%s",
1092 iio_direction[chan->output],
1093 iio_chan_type_name_spec[chan->type],
1100 goto error_free_full_postfix;
1102 dev_attr->attr.name = name;
1105 dev_attr->attr.mode |= 0444;
1106 dev_attr->show = readfunc;
1110 dev_attr->attr.mode |= 0200;
1111 dev_attr->store = writefunc;
1114 error_free_full_postfix:
1115 kfree(full_postfix);
1120 static void __iio_device_attr_deinit(struct device_attribute *dev_attr)
1122 kfree(dev_attr->attr.name);
1125 int __iio_add_chan_devattr(const char *postfix,
1126 struct iio_chan_spec const *chan,
1127 ssize_t (*readfunc)(struct device *dev,
1128 struct device_attribute *attr,
1130 ssize_t (*writefunc)(struct device *dev,
1131 struct device_attribute *attr,
1135 enum iio_shared_by shared_by,
1137 struct iio_buffer *buffer,
1138 struct list_head *attr_list)
1141 struct iio_dev_attr *iio_attr, *t;
1143 iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
1144 if (iio_attr == NULL)
1146 ret = __iio_device_attr_init(&iio_attr->dev_attr,
1148 readfunc, writefunc, shared_by);
1150 goto error_iio_dev_attr_free;
1152 iio_attr->address = mask;
1153 iio_attr->buffer = buffer;
1154 list_for_each_entry(t, attr_list, l)
1155 if (strcmp(t->dev_attr.attr.name,
1156 iio_attr->dev_attr.attr.name) == 0) {
1157 if (shared_by == IIO_SEPARATE)
1158 dev_err(dev, "tried to double register : %s\n",
1159 t->dev_attr.attr.name);
1161 goto error_device_attr_deinit;
1163 list_add(&iio_attr->l, attr_list);
1167 error_device_attr_deinit:
1168 __iio_device_attr_deinit(&iio_attr->dev_attr);
1169 error_iio_dev_attr_free:
1174 static int iio_device_add_channel_label(struct iio_dev *indio_dev,
1175 struct iio_chan_spec const *chan)
1177 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1180 if (!indio_dev->info->read_label && !chan->extend_name)
1183 ret = __iio_add_chan_devattr("label",
1185 &iio_read_channel_label,
1191 &iio_dev_opaque->channel_attr_list);
1198 static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
1199 struct iio_chan_spec const *chan,
1200 enum iio_shared_by shared_by,
1201 const long *infomask)
1203 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1204 int i, ret, attrcount = 0;
1206 for_each_set_bit(i, infomask, sizeof(*infomask)*8) {
1207 if (i >= ARRAY_SIZE(iio_chan_info_postfix))
1209 ret = __iio_add_chan_devattr(iio_chan_info_postfix[i],
1211 &iio_read_channel_info,
1212 &iio_write_channel_info,
1217 &iio_dev_opaque->channel_attr_list);
1218 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
1228 static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev,
1229 struct iio_chan_spec const *chan,
1230 enum iio_shared_by shared_by,
1231 const long *infomask)
1233 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1234 int i, ret, attrcount = 0;
1235 char *avail_postfix;
1237 for_each_set_bit(i, infomask, sizeof(*infomask) * 8) {
1238 if (i >= ARRAY_SIZE(iio_chan_info_postfix))
1240 avail_postfix = kasprintf(GFP_KERNEL,
1242 iio_chan_info_postfix[i]);
1246 ret = __iio_add_chan_devattr(avail_postfix,
1248 &iio_read_channel_info_avail,
1254 &iio_dev_opaque->channel_attr_list);
1255 kfree(avail_postfix);
1256 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
1266 static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
1267 struct iio_chan_spec const *chan)
1269 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1270 int ret, attrcount = 0;
1271 const struct iio_chan_spec_ext_info *ext_info;
1273 if (chan->channel < 0)
1275 ret = iio_device_add_info_mask_type(indio_dev, chan,
1277 &chan->info_mask_separate);
1282 ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1284 &chan->info_mask_separate_available);
1289 ret = iio_device_add_info_mask_type(indio_dev, chan,
1291 &chan->info_mask_shared_by_type);
1296 ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1298 &chan->info_mask_shared_by_type_available);
1303 ret = iio_device_add_info_mask_type(indio_dev, chan,
1305 &chan->info_mask_shared_by_dir);
1310 ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1312 &chan->info_mask_shared_by_dir_available);
1317 ret = iio_device_add_info_mask_type(indio_dev, chan,
1319 &chan->info_mask_shared_by_all);
1324 ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1326 &chan->info_mask_shared_by_all_available);
1331 ret = iio_device_add_channel_label(indio_dev, chan);
1336 if (chan->ext_info) {
1339 for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
1340 ret = __iio_add_chan_devattr(ext_info->name,
1343 &iio_read_channel_ext_info : NULL,
1345 &iio_write_channel_ext_info : NULL,
1350 &iio_dev_opaque->channel_attr_list);
1352 if (ret == -EBUSY && ext_info->shared)
1366 * iio_free_chan_devattr_list() - Free a list of IIO device attributes
1367 * @attr_list: List of IIO device attributes
1369 * This function frees the memory allocated for each of the IIO device
1370 * attributes in the list.
1372 void iio_free_chan_devattr_list(struct list_head *attr_list)
1374 struct iio_dev_attr *p, *n;
1376 list_for_each_entry_safe(p, n, attr_list, l) {
1377 kfree_const(p->dev_attr.attr.name);
1383 static ssize_t name_show(struct device *dev, struct device_attribute *attr,
1386 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1388 return sysfs_emit(buf, "%s\n", indio_dev->name);
1391 static DEVICE_ATTR_RO(name);
1393 static ssize_t label_show(struct device *dev, struct device_attribute *attr,
1396 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1398 return sysfs_emit(buf, "%s\n", indio_dev->label);
1401 static DEVICE_ATTR_RO(label);
1403 static ssize_t current_timestamp_clock_show(struct device *dev,
1404 struct device_attribute *attr,
1407 const struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1408 const clockid_t clk = iio_device_get_clock(indio_dev);
1413 case CLOCK_REALTIME:
1414 name = "realtime\n";
1415 sz = sizeof("realtime\n");
1417 case CLOCK_MONOTONIC:
1418 name = "monotonic\n";
1419 sz = sizeof("monotonic\n");
1421 case CLOCK_MONOTONIC_RAW:
1422 name = "monotonic_raw\n";
1423 sz = sizeof("monotonic_raw\n");
1425 case CLOCK_REALTIME_COARSE:
1426 name = "realtime_coarse\n";
1427 sz = sizeof("realtime_coarse\n");
1429 case CLOCK_MONOTONIC_COARSE:
1430 name = "monotonic_coarse\n";
1431 sz = sizeof("monotonic_coarse\n");
1433 case CLOCK_BOOTTIME:
1434 name = "boottime\n";
1435 sz = sizeof("boottime\n");
1439 sz = sizeof("tai\n");
1445 memcpy(buf, name, sz);
1449 static ssize_t current_timestamp_clock_store(struct device *dev,
1450 struct device_attribute *attr,
1451 const char *buf, size_t len)
1456 if (sysfs_streq(buf, "realtime"))
1457 clk = CLOCK_REALTIME;
1458 else if (sysfs_streq(buf, "monotonic"))
1459 clk = CLOCK_MONOTONIC;
1460 else if (sysfs_streq(buf, "monotonic_raw"))
1461 clk = CLOCK_MONOTONIC_RAW;
1462 else if (sysfs_streq(buf, "realtime_coarse"))
1463 clk = CLOCK_REALTIME_COARSE;
1464 else if (sysfs_streq(buf, "monotonic_coarse"))
1465 clk = CLOCK_MONOTONIC_COARSE;
1466 else if (sysfs_streq(buf, "boottime"))
1467 clk = CLOCK_BOOTTIME;
1468 else if (sysfs_streq(buf, "tai"))
1473 ret = iio_device_set_clock(dev_to_iio_dev(dev), clk);
1480 int iio_device_register_sysfs_group(struct iio_dev *indio_dev,
1481 const struct attribute_group *group)
1483 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1484 const struct attribute_group **new, **old = iio_dev_opaque->groups;
1485 unsigned int cnt = iio_dev_opaque->groupcounter;
1487 new = krealloc(old, sizeof(*new) * (cnt + 2), GFP_KERNEL);
1491 new[iio_dev_opaque->groupcounter++] = group;
1492 new[iio_dev_opaque->groupcounter] = NULL;
1494 iio_dev_opaque->groups = new;
1499 static DEVICE_ATTR_RW(current_timestamp_clock);
1501 static int iio_device_register_sysfs(struct iio_dev *indio_dev)
1503 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1504 int i, ret = 0, attrcount, attrn, attrcount_orig = 0;
1505 struct iio_dev_attr *p;
1506 struct attribute **attr, *clk = NULL;
1508 /* First count elements in any existing group */
1509 if (indio_dev->info->attrs) {
1510 attr = indio_dev->info->attrs->attrs;
1511 while (*attr++ != NULL)
1514 attrcount = attrcount_orig;
1516 * New channel registration method - relies on the fact a group does
1517 * not need to be initialized if its name is NULL.
1519 if (indio_dev->channels)
1520 for (i = 0; i < indio_dev->num_channels; i++) {
1521 const struct iio_chan_spec *chan =
1522 &indio_dev->channels[i];
1524 if (chan->type == IIO_TIMESTAMP)
1525 clk = &dev_attr_current_timestamp_clock.attr;
1527 ret = iio_device_add_channel_sysfs(indio_dev, chan);
1529 goto error_clear_attrs;
1533 if (iio_dev_opaque->event_interface)
1534 clk = &dev_attr_current_timestamp_clock.attr;
1536 if (indio_dev->name)
1538 if (indio_dev->label)
1543 iio_dev_opaque->chan_attr_group.attrs =
1544 kcalloc(attrcount + 1,
1545 sizeof(iio_dev_opaque->chan_attr_group.attrs[0]),
1547 if (iio_dev_opaque->chan_attr_group.attrs == NULL) {
1549 goto error_clear_attrs;
1551 /* Copy across original attributes, and point to original binary attributes */
1552 if (indio_dev->info->attrs) {
1553 memcpy(iio_dev_opaque->chan_attr_group.attrs,
1554 indio_dev->info->attrs->attrs,
1555 sizeof(iio_dev_opaque->chan_attr_group.attrs[0])
1557 iio_dev_opaque->chan_attr_group.is_visible =
1558 indio_dev->info->attrs->is_visible;
1559 iio_dev_opaque->chan_attr_group.bin_attrs =
1560 indio_dev->info->attrs->bin_attrs;
1562 attrn = attrcount_orig;
1563 /* Add all elements from the list. */
1564 list_for_each_entry(p, &iio_dev_opaque->channel_attr_list, l)
1565 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
1566 if (indio_dev->name)
1567 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
1568 if (indio_dev->label)
1569 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr;
1571 iio_dev_opaque->chan_attr_group.attrs[attrn++] = clk;
1573 ret = iio_device_register_sysfs_group(indio_dev,
1574 &iio_dev_opaque->chan_attr_group);
1576 goto error_clear_attrs;
1581 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list);
1586 static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
1588 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1590 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list);
1591 kfree(iio_dev_opaque->chan_attr_group.attrs);
1592 iio_dev_opaque->chan_attr_group.attrs = NULL;
1593 kfree(iio_dev_opaque->groups);
1594 iio_dev_opaque->groups = NULL;
1597 static void iio_dev_release(struct device *device)
1599 struct iio_dev *indio_dev = dev_to_iio_dev(device);
1600 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1602 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES)
1603 iio_device_unregister_trigger_consumer(indio_dev);
1604 iio_device_unregister_eventset(indio_dev);
1605 iio_device_unregister_sysfs(indio_dev);
1607 iio_device_detach_buffers(indio_dev);
1609 lockdep_unregister_key(&iio_dev_opaque->mlock_key);
1611 ida_free(&iio_ida, iio_dev_opaque->id);
1612 kfree(iio_dev_opaque);
1615 const struct device_type iio_device_type = {
1616 .name = "iio_device",
1617 .release = iio_dev_release,
1621 * iio_device_alloc() - allocate an iio_dev from a driver
1622 * @parent: Parent device.
1623 * @sizeof_priv: Space to allocate for private structure.
1625 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
1627 struct iio_dev_opaque *iio_dev_opaque;
1628 struct iio_dev *indio_dev;
1631 alloc_size = sizeof(struct iio_dev_opaque);
1633 alloc_size = ALIGN(alloc_size, IIO_DMA_MINALIGN);
1634 alloc_size += sizeof_priv;
1637 iio_dev_opaque = kzalloc(alloc_size, GFP_KERNEL);
1638 if (!iio_dev_opaque)
1641 indio_dev = &iio_dev_opaque->indio_dev;
1642 indio_dev->priv = (char *)iio_dev_opaque +
1643 ALIGN(sizeof(struct iio_dev_opaque), IIO_DMA_MINALIGN);
1645 indio_dev->dev.parent = parent;
1646 indio_dev->dev.type = &iio_device_type;
1647 indio_dev->dev.bus = &iio_bus_type;
1648 device_initialize(&indio_dev->dev);
1649 mutex_init(&iio_dev_opaque->mlock);
1650 mutex_init(&iio_dev_opaque->info_exist_lock);
1651 INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list);
1653 iio_dev_opaque->id = ida_alloc(&iio_ida, GFP_KERNEL);
1654 if (iio_dev_opaque->id < 0) {
1655 /* cannot use a dev_err as the name isn't available */
1656 pr_err("failed to get device id\n");
1657 kfree(iio_dev_opaque);
1661 if (dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id)) {
1662 ida_free(&iio_ida, iio_dev_opaque->id);
1663 kfree(iio_dev_opaque);
1667 INIT_LIST_HEAD(&iio_dev_opaque->buffer_list);
1668 INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers);
1670 lockdep_register_key(&iio_dev_opaque->mlock_key);
1671 lockdep_set_class(&iio_dev_opaque->mlock, &iio_dev_opaque->mlock_key);
1675 EXPORT_SYMBOL(iio_device_alloc);
1678 * iio_device_free() - free an iio_dev from a driver
1679 * @dev: the iio_dev associated with the device
1681 void iio_device_free(struct iio_dev *dev)
1684 put_device(&dev->dev);
1686 EXPORT_SYMBOL(iio_device_free);
1688 static void devm_iio_device_release(void *iio_dev)
1690 iio_device_free(iio_dev);
1694 * devm_iio_device_alloc - Resource-managed iio_device_alloc()
1695 * @parent: Device to allocate iio_dev for, and parent for this IIO device
1696 * @sizeof_priv: Space to allocate for private structure.
1698 * Managed iio_device_alloc. iio_dev allocated with this function is
1699 * automatically freed on driver detach.
1702 * Pointer to allocated iio_dev on success, NULL on failure.
1704 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv)
1706 struct iio_dev *iio_dev;
1709 iio_dev = iio_device_alloc(parent, sizeof_priv);
1713 ret = devm_add_action_or_reset(parent, devm_iio_device_release,
1720 EXPORT_SYMBOL_GPL(devm_iio_device_alloc);
1723 * iio_chrdev_open() - chrdev file open for buffer access and ioctls
1724 * @inode: Inode structure for identifying the device in the file system
1725 * @filp: File structure for iio device used to keep and later access
1728 * Return: 0 on success or -EBUSY if the device is already opened
1730 static int iio_chrdev_open(struct inode *inode, struct file *filp)
1732 struct iio_dev_opaque *iio_dev_opaque =
1733 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev);
1734 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev;
1735 struct iio_dev_buffer_pair *ib;
1737 if (test_and_set_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags))
1740 iio_device_get(indio_dev);
1742 ib = kmalloc(sizeof(*ib), GFP_KERNEL);
1744 iio_device_put(indio_dev);
1745 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags);
1749 ib->indio_dev = indio_dev;
1750 ib->buffer = indio_dev->buffer;
1752 filp->private_data = ib;
1758 * iio_chrdev_release() - chrdev file close buffer access and ioctls
1759 * @inode: Inode structure pointer for the char device
1760 * @filp: File structure pointer for the char device
1762 * Return: 0 for successful release
1764 static int iio_chrdev_release(struct inode *inode, struct file *filp)
1766 struct iio_dev_buffer_pair *ib = filp->private_data;
1767 struct iio_dev_opaque *iio_dev_opaque =
1768 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev);
1769 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev;
1772 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags);
1773 iio_device_put(indio_dev);
1778 void iio_device_ioctl_handler_register(struct iio_dev *indio_dev,
1779 struct iio_ioctl_handler *h)
1781 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1783 list_add_tail(&h->entry, &iio_dev_opaque->ioctl_handlers);
1786 void iio_device_ioctl_handler_unregister(struct iio_ioctl_handler *h)
1788 list_del(&h->entry);
1791 static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1793 struct iio_dev_buffer_pair *ib = filp->private_data;
1794 struct iio_dev *indio_dev = ib->indio_dev;
1795 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1796 struct iio_ioctl_handler *h;
1799 mutex_lock(&iio_dev_opaque->info_exist_lock);
1802 * The NULL check here is required to prevent crashing when a device
1803 * is being removed while userspace would still have open file handles
1804 * to try to access this device.
1806 if (!indio_dev->info)
1809 list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
1810 ret = h->ioctl(indio_dev, filp, cmd, arg);
1811 if (ret != IIO_IOCTL_UNHANDLED)
1815 if (ret == IIO_IOCTL_UNHANDLED)
1819 mutex_unlock(&iio_dev_opaque->info_exist_lock);
1824 static const struct file_operations iio_buffer_fileops = {
1825 .owner = THIS_MODULE,
1826 .llseek = noop_llseek,
1827 .read = iio_buffer_read_outer_addr,
1828 .write = iio_buffer_write_outer_addr,
1829 .poll = iio_buffer_poll_addr,
1830 .unlocked_ioctl = iio_ioctl,
1831 .compat_ioctl = compat_ptr_ioctl,
1832 .open = iio_chrdev_open,
1833 .release = iio_chrdev_release,
1836 static const struct file_operations iio_event_fileops = {
1837 .owner = THIS_MODULE,
1838 .llseek = noop_llseek,
1839 .unlocked_ioctl = iio_ioctl,
1840 .compat_ioctl = compat_ptr_ioctl,
1841 .open = iio_chrdev_open,
1842 .release = iio_chrdev_release,
1845 static int iio_check_unique_scan_index(struct iio_dev *indio_dev)
1848 const struct iio_chan_spec *channels = indio_dev->channels;
1850 if (!(indio_dev->modes & INDIO_ALL_BUFFER_MODES))
1853 for (i = 0; i < indio_dev->num_channels - 1; i++) {
1854 if (channels[i].scan_index < 0)
1856 for (j = i + 1; j < indio_dev->num_channels; j++)
1857 if (channels[i].scan_index == channels[j].scan_index) {
1858 dev_err(&indio_dev->dev,
1859 "Duplicate scan index %d\n",
1860 channels[i].scan_index);
1868 static int iio_check_extended_name(const struct iio_dev *indio_dev)
1872 if (!indio_dev->info->read_label)
1875 for (i = 0; i < indio_dev->num_channels; i++) {
1876 if (indio_dev->channels[i].extend_name) {
1877 dev_err(&indio_dev->dev,
1878 "Cannot use labels and extend_name at the same time\n");
1886 static const struct iio_buffer_setup_ops noop_ring_setup_ops;
1888 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
1890 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1891 struct fwnode_handle *fwnode;
1894 if (!indio_dev->info)
1897 iio_dev_opaque->driver_module = this_mod;
1899 /* If the calling driver did not initialize firmware node, do it here */
1900 if (dev_fwnode(&indio_dev->dev))
1901 fwnode = dev_fwnode(&indio_dev->dev);
1903 fwnode = dev_fwnode(indio_dev->dev.parent);
1904 device_set_node(&indio_dev->dev, fwnode);
1906 fwnode_property_read_string(fwnode, "label", &indio_dev->label);
1908 ret = iio_check_unique_scan_index(indio_dev);
1912 ret = iio_check_extended_name(indio_dev);
1916 iio_device_register_debugfs(indio_dev);
1918 ret = iio_buffers_alloc_sysfs_and_mask(indio_dev);
1920 dev_err(indio_dev->dev.parent,
1921 "Failed to create buffer sysfs interfaces\n");
1922 goto error_unreg_debugfs;
1925 ret = iio_device_register_sysfs(indio_dev);
1927 dev_err(indio_dev->dev.parent,
1928 "Failed to register sysfs interfaces\n");
1929 goto error_buffer_free_sysfs;
1931 ret = iio_device_register_eventset(indio_dev);
1933 dev_err(indio_dev->dev.parent,
1934 "Failed to register event set\n");
1935 goto error_free_sysfs;
1937 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES)
1938 iio_device_register_trigger_consumer(indio_dev);
1940 if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) &&
1941 indio_dev->setup_ops == NULL)
1942 indio_dev->setup_ops = &noop_ring_setup_ops;
1944 if (iio_dev_opaque->attached_buffers_cnt)
1945 cdev_init(&iio_dev_opaque->chrdev, &iio_buffer_fileops);
1946 else if (iio_dev_opaque->event_interface)
1947 cdev_init(&iio_dev_opaque->chrdev, &iio_event_fileops);
1949 if (iio_dev_opaque->attached_buffers_cnt || iio_dev_opaque->event_interface) {
1950 indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), iio_dev_opaque->id);
1951 iio_dev_opaque->chrdev.owner = this_mod;
1954 /* assign device groups now; they should be all registered now */
1955 indio_dev->dev.groups = iio_dev_opaque->groups;
1957 ret = cdev_device_add(&iio_dev_opaque->chrdev, &indio_dev->dev);
1959 goto error_unreg_eventset;
1963 error_unreg_eventset:
1964 iio_device_unregister_eventset(indio_dev);
1966 iio_device_unregister_sysfs(indio_dev);
1967 error_buffer_free_sysfs:
1968 iio_buffers_free_sysfs_and_mask(indio_dev);
1969 error_unreg_debugfs:
1970 iio_device_unregister_debugfs(indio_dev);
1973 EXPORT_SYMBOL(__iio_device_register);
1976 * iio_device_unregister() - unregister a device from the IIO subsystem
1977 * @indio_dev: Device structure representing the device.
1979 void iio_device_unregister(struct iio_dev *indio_dev)
1981 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1983 cdev_device_del(&iio_dev_opaque->chrdev, &indio_dev->dev);
1985 mutex_lock(&iio_dev_opaque->info_exist_lock);
1987 iio_device_unregister_debugfs(indio_dev);
1989 iio_disable_all_buffers(indio_dev);
1991 indio_dev->info = NULL;
1993 iio_device_wakeup_eventset(indio_dev);
1994 iio_buffer_wakeup_poll(indio_dev);
1996 mutex_unlock(&iio_dev_opaque->info_exist_lock);
1998 iio_buffers_free_sysfs_and_mask(indio_dev);
2000 EXPORT_SYMBOL(iio_device_unregister);
2002 static void devm_iio_device_unreg(void *indio_dev)
2004 iio_device_unregister(indio_dev);
2007 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
2008 struct module *this_mod)
2012 ret = __iio_device_register(indio_dev, this_mod);
2016 return devm_add_action_or_reset(dev, devm_iio_device_unreg, indio_dev);
2018 EXPORT_SYMBOL_GPL(__devm_iio_device_register);
2021 * iio_device_claim_direct_mode - Keep device in direct mode
2022 * @indio_dev: the iio_dev associated with the device
2024 * If the device is in direct mode it is guaranteed to stay
2025 * that way until iio_device_release_direct_mode() is called.
2027 * Use with iio_device_release_direct_mode()
2029 * Returns: 0 on success, -EBUSY on failure
2031 int iio_device_claim_direct_mode(struct iio_dev *indio_dev)
2033 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2035 mutex_lock(&iio_dev_opaque->mlock);
2037 if (iio_buffer_enabled(indio_dev)) {
2038 mutex_unlock(&iio_dev_opaque->mlock);
2043 EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode);
2046 * iio_device_release_direct_mode - releases claim on direct mode
2047 * @indio_dev: the iio_dev associated with the device
2049 * Release the claim. Device is no longer guaranteed to stay
2052 * Use with iio_device_claim_direct_mode()
2054 void iio_device_release_direct_mode(struct iio_dev *indio_dev)
2056 mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock);
2058 EXPORT_SYMBOL_GPL(iio_device_release_direct_mode);
2061 * iio_device_claim_buffer_mode - Keep device in buffer mode
2062 * @indio_dev: the iio_dev associated with the device
2064 * If the device is in buffer mode it is guaranteed to stay
2065 * that way until iio_device_release_buffer_mode() is called.
2067 * Use with iio_device_release_buffer_mode().
2069 * Returns: 0 on success, -EBUSY on failure.
2071 int iio_device_claim_buffer_mode(struct iio_dev *indio_dev)
2073 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2075 mutex_lock(&iio_dev_opaque->mlock);
2077 if (iio_buffer_enabled(indio_dev))
2080 mutex_unlock(&iio_dev_opaque->mlock);
2083 EXPORT_SYMBOL_GPL(iio_device_claim_buffer_mode);
2086 * iio_device_release_buffer_mode - releases claim on buffer mode
2087 * @indio_dev: the iio_dev associated with the device
2089 * Release the claim. Device is no longer guaranteed to stay
2092 * Use with iio_device_claim_buffer_mode().
2094 void iio_device_release_buffer_mode(struct iio_dev *indio_dev)
2096 mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock);
2098 EXPORT_SYMBOL_GPL(iio_device_release_buffer_mode);
2101 * iio_device_get_current_mode() - helper function providing read-only access to
2102 * the opaque @currentmode variable
2103 * @indio_dev: IIO device structure for device
2105 int iio_device_get_current_mode(struct iio_dev *indio_dev)
2107 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2109 return iio_dev_opaque->currentmode;
2111 EXPORT_SYMBOL_GPL(iio_device_get_current_mode);
2113 subsys_initcall(iio_init);
2114 module_exit(iio_exit);
2117 MODULE_DESCRIPTION("Industrial I/O core");
2118 MODULE_LICENSE("GPL");