]>
Commit | Line | Data |
---|---|---|
7026ea4b JC |
1 | /* The industrial I/O core |
2 | * | |
3 | * Copyright (c) 2008 Jonathan Cameron | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published by | |
7 | * the Free Software Foundation. | |
8 | * | |
14555b14 | 9 | * Handling of buffer allocation / resizing. |
7026ea4b JC |
10 | * |
11 | * | |
12 | * Things to look at here. | |
13 | * - Better memory allocation techniques? | |
14 | * - Alternative access techniques? | |
15 | */ | |
16 | #include <linux/kernel.h> | |
8e336a72 | 17 | #include <linux/export.h> |
7026ea4b | 18 | #include <linux/device.h> |
7026ea4b | 19 | #include <linux/fs.h> |
7026ea4b | 20 | #include <linux/cdev.h> |
5a0e3ad6 | 21 | #include <linux/slab.h> |
a7348347 | 22 | #include <linux/poll.h> |
174cd4b1 | 23 | #include <linux/sched/signal.h> |
7026ea4b | 24 | |
06458e27 | 25 | #include <linux/iio/iio.h> |
df9c1c42 | 26 | #include "iio_core.h" |
06458e27 JC |
27 | #include <linux/iio/sysfs.h> |
28 | #include <linux/iio/buffer.h> | |
33dd94cb | 29 | #include <linux/iio/buffer_impl.h> |
7026ea4b | 30 | |
8310b86c JC |
31 | static const char * const iio_endian_prefix[] = { |
32 | [IIO_BE] = "be", | |
33 | [IIO_LE] = "le", | |
34 | }; | |
7026ea4b | 35 | |
705ee2c9 | 36 | static bool iio_buffer_is_active(struct iio_buffer *buf) |
84b36ce5 | 37 | { |
705ee2c9 | 38 | return !list_empty(&buf->buffer_list); |
84b36ce5 JC |
39 | } |
40 | ||
37d34556 | 41 | static size_t iio_buffer_data_available(struct iio_buffer *buf) |
647cc7b9 | 42 | { |
9dd4694d | 43 | return buf->access->data_available(buf); |
647cc7b9 LPC |
44 | } |
45 | ||
f4f4673b OP |
46 | static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev, |
47 | struct iio_buffer *buf, size_t required) | |
48 | { | |
49 | if (!indio_dev->info->hwfifo_flush_to_buffer) | |
50 | return -ENODEV; | |
51 | ||
52 | return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required); | |
53 | } | |
54 | ||
37d34556 | 55 | static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf, |
f4f4673b | 56 | size_t to_wait, int to_flush) |
37d34556 | 57 | { |
f4f4673b OP |
58 | size_t avail; |
59 | int flushed = 0; | |
60 | ||
37d34556 JC |
61 | /* wakeup if the device was unregistered */ |
62 | if (!indio_dev->info) | |
63 | return true; | |
64 | ||
65 | /* drain the buffer if it was disabled */ | |
f4f4673b | 66 | if (!iio_buffer_is_active(buf)) { |
37d34556 | 67 | to_wait = min_t(size_t, to_wait, 1); |
f4f4673b OP |
68 | to_flush = 0; |
69 | } | |
70 | ||
71 | avail = iio_buffer_data_available(buf); | |
37d34556 | 72 | |
f4f4673b OP |
73 | if (avail >= to_wait) { |
74 | /* force a flush for non-blocking reads */ | |
c6f67a1f OP |
75 | if (!to_wait && avail < to_flush) |
76 | iio_buffer_flush_hwfifo(indio_dev, buf, | |
77 | to_flush - avail); | |
f4f4673b OP |
78 | return true; |
79 | } | |
80 | ||
81 | if (to_flush) | |
82 | flushed = iio_buffer_flush_hwfifo(indio_dev, buf, | |
83 | to_wait - avail); | |
84 | if (flushed <= 0) | |
85 | return false; | |
86 | ||
87 | if (avail + flushed >= to_wait) | |
37d34556 JC |
88 | return true; |
89 | ||
90 | return false; | |
91 | } | |
92 | ||
7026ea4b | 93 | /** |
14555b14 | 94 | * iio_buffer_read_first_n_outer() - chrdev read for buffer access |
0123635a CO |
95 | * @filp: File structure pointer for the char device |
96 | * @buf: Destination buffer for iio buffer read | |
97 | * @n: First n bytes to read | |
98 | * @f_ps: Long offset provided by the user as a seek position | |
7026ea4b | 99 | * |
14555b14 JC |
100 | * This function relies on all buffer implementations having an |
101 | * iio_buffer as their first element. | |
0123635a CO |
102 | * |
103 | * Return: negative values corresponding to error codes or ret != 0 | |
104 | * for ending the reading activity | |
7026ea4b | 105 | **/ |
14555b14 JC |
106 | ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, |
107 | size_t n, loff_t *f_ps) | |
7026ea4b | 108 | { |
1aa04278 | 109 | struct iio_dev *indio_dev = filp->private_data; |
14555b14 | 110 | struct iio_buffer *rb = indio_dev->buffer; |
fcf68f3c | 111 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
37d34556 | 112 | size_t datum_size; |
c6f67a1f | 113 | size_t to_wait; |
5dba4b14 | 114 | int ret = 0; |
d5857d65 | 115 | |
f18e7a06 LPC |
116 | if (!indio_dev->info) |
117 | return -ENODEV; | |
118 | ||
96e00f11 | 119 | if (!rb || !rb->access->read_first_n) |
7026ea4b | 120 | return -EINVAL; |
ee551a10 | 121 | |
37d34556 JC |
122 | datum_size = rb->bytes_per_datum; |
123 | ||
124 | /* | |
125 | * If datum_size is 0 there will never be anything to read from the | |
126 | * buffer, so signal end of file now. | |
127 | */ | |
128 | if (!datum_size) | |
129 | return 0; | |
130 | ||
c6f67a1f OP |
131 | if (filp->f_flags & O_NONBLOCK) |
132 | to_wait = 0; | |
133 | else | |
134 | to_wait = min_t(size_t, n / datum_size, rb->watermark); | |
37d34556 | 135 | |
fcf68f3c | 136 | add_wait_queue(&rb->pollq, &wait); |
ee551a10 | 137 | do { |
fcf68f3c BN |
138 | if (!indio_dev->info) { |
139 | ret = -ENODEV; | |
140 | break; | |
141 | } | |
142 | ||
143 | if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) { | |
144 | if (signal_pending(current)) { | |
145 | ret = -ERESTARTSYS; | |
146 | break; | |
147 | } | |
148 | ||
149 | wait_woken(&wait, TASK_INTERRUPTIBLE, | |
150 | MAX_SCHEDULE_TIMEOUT); | |
151 | continue; | |
152 | } | |
ee551a10 LPC |
153 | |
154 | ret = rb->access->read_first_n(rb, n, buf); | |
155 | if (ret == 0 && (filp->f_flags & O_NONBLOCK)) | |
156 | ret = -EAGAIN; | |
5dba4b14 | 157 | } while (ret == 0); |
fcf68f3c | 158 | remove_wait_queue(&rb->pollq, &wait); |
ee551a10 LPC |
159 | |
160 | return ret; | |
7026ea4b JC |
161 | } |
162 | ||
a7348347 | 163 | /** |
14555b14 | 164 | * iio_buffer_poll() - poll the buffer to find out if it has data |
0123635a CO |
165 | * @filp: File structure pointer for device access |
166 | * @wait: Poll table structure pointer for which the driver adds | |
167 | * a wait queue | |
168 | * | |
a9a08845 | 169 | * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading |
0123635a | 170 | * or 0 for other cases |
a7348347 | 171 | */ |
afc9a42b | 172 | __poll_t iio_buffer_poll(struct file *filp, |
14555b14 | 173 | struct poll_table_struct *wait) |
a7348347 | 174 | { |
1aa04278 | 175 | struct iio_dev *indio_dev = filp->private_data; |
14555b14 | 176 | struct iio_buffer *rb = indio_dev->buffer; |
a7348347 | 177 | |
4cd140bd | 178 | if (!indio_dev->info || rb == NULL) |
1bdc0293 | 179 | return 0; |
f18e7a06 | 180 | |
a7348347 | 181 | poll_wait(filp, &rb->pollq, wait); |
f4f4673b | 182 | if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0)) |
a9a08845 | 183 | return EPOLLIN | EPOLLRDNORM; |
8d213f24 | 184 | return 0; |
a7348347 JC |
185 | } |
186 | ||
d2f0a48f LPC |
187 | /** |
188 | * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue | |
189 | * @indio_dev: The IIO device | |
190 | * | |
191 | * Wakes up the event waitqueue used for poll(). Should usually | |
192 | * be called when the device is unregistered. | |
193 | */ | |
194 | void iio_buffer_wakeup_poll(struct iio_dev *indio_dev) | |
195 | { | |
196 | if (!indio_dev->buffer) | |
197 | return; | |
198 | ||
199 | wake_up(&indio_dev->buffer->pollq); | |
200 | } | |
201 | ||
f79a9098 | 202 | void iio_buffer_init(struct iio_buffer *buffer) |
7026ea4b | 203 | { |
5ada4ea9 | 204 | INIT_LIST_HEAD(&buffer->demux_list); |
705ee2c9 | 205 | INIT_LIST_HEAD(&buffer->buffer_list); |
14555b14 | 206 | init_waitqueue_head(&buffer->pollq); |
9e69c935 | 207 | kref_init(&buffer->ref); |
4a605357 LPC |
208 | if (!buffer->watermark) |
209 | buffer->watermark = 1; | |
7026ea4b | 210 | } |
14555b14 | 211 | EXPORT_SYMBOL(iio_buffer_init); |
7026ea4b | 212 | |
9f466777 JC |
213 | /** |
214 | * iio_buffer_set_attrs - Set buffer specific attributes | |
215 | * @buffer: The buffer for which we are setting attributes | |
216 | * @attrs: Pointer to a null terminated list of pointers to attributes | |
217 | */ | |
218 | void iio_buffer_set_attrs(struct iio_buffer *buffer, | |
219 | const struct attribute **attrs) | |
220 | { | |
221 | buffer->attrs = attrs; | |
222 | } | |
223 | EXPORT_SYMBOL_GPL(iio_buffer_set_attrs); | |
224 | ||
1d892719 | 225 | static ssize_t iio_show_scan_index(struct device *dev, |
8d213f24 JC |
226 | struct device_attribute *attr, |
227 | char *buf) | |
1d892719 | 228 | { |
8d213f24 | 229 | return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); |
1d892719 JC |
230 | } |
231 | ||
232 | static ssize_t iio_show_fixed_type(struct device *dev, | |
233 | struct device_attribute *attr, | |
234 | char *buf) | |
235 | { | |
236 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); | |
8310b86c JC |
237 | u8 type = this_attr->c->scan_type.endianness; |
238 | ||
239 | if (type == IIO_CPU) { | |
9d5d1153 JC |
240 | #ifdef __LITTLE_ENDIAN |
241 | type = IIO_LE; | |
242 | #else | |
243 | type = IIO_BE; | |
244 | #endif | |
8310b86c | 245 | } |
0ee8546a SP |
246 | if (this_attr->c->scan_type.repeat > 1) |
247 | return sprintf(buf, "%s:%c%d/%dX%d>>%u\n", | |
248 | iio_endian_prefix[type], | |
249 | this_attr->c->scan_type.sign, | |
250 | this_attr->c->scan_type.realbits, | |
251 | this_attr->c->scan_type.storagebits, | |
252 | this_attr->c->scan_type.repeat, | |
253 | this_attr->c->scan_type.shift); | |
254 | else | |
255 | return sprintf(buf, "%s:%c%d/%d>>%u\n", | |
8310b86c | 256 | iio_endian_prefix[type], |
1d892719 JC |
257 | this_attr->c->scan_type.sign, |
258 | this_attr->c->scan_type.realbits, | |
259 | this_attr->c->scan_type.storagebits, | |
260 | this_attr->c->scan_type.shift); | |
261 | } | |
262 | ||
8d213f24 JC |
263 | static ssize_t iio_scan_el_show(struct device *dev, |
264 | struct device_attribute *attr, | |
265 | char *buf) | |
266 | { | |
267 | int ret; | |
e53f5ac5 | 268 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
8d213f24 | 269 | |
2076a20f AB |
270 | /* Ensure ret is 0 or 1. */ |
271 | ret = !!test_bit(to_iio_dev_attr(attr)->address, | |
5ada4ea9 JC |
272 | indio_dev->buffer->scan_mask); |
273 | ||
8d213f24 JC |
274 | return sprintf(buf, "%d\n", ret); |
275 | } | |
276 | ||
217a5cf0 LPC |
277 | /* Note NULL used as error indicator as it doesn't make sense. */ |
278 | static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, | |
279 | unsigned int masklength, | |
1e1ec286 LPC |
280 | const unsigned long *mask, |
281 | bool strict) | |
217a5cf0 LPC |
282 | { |
283 | if (bitmap_empty(mask, masklength)) | |
284 | return NULL; | |
285 | while (*av_masks) { | |
1e1ec286 LPC |
286 | if (strict) { |
287 | if (bitmap_equal(mask, av_masks, masklength)) | |
288 | return av_masks; | |
289 | } else { | |
290 | if (bitmap_subset(mask, av_masks, masklength)) | |
291 | return av_masks; | |
292 | } | |
217a5cf0 LPC |
293 | av_masks += BITS_TO_LONGS(masklength); |
294 | } | |
295 | return NULL; | |
296 | } | |
297 | ||
298 | static bool iio_validate_scan_mask(struct iio_dev *indio_dev, | |
299 | const unsigned long *mask) | |
300 | { | |
301 | if (!indio_dev->setup_ops->validate_scan_mask) | |
302 | return true; | |
303 | ||
304 | return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); | |
305 | } | |
306 | ||
307 | /** | |
308 | * iio_scan_mask_set() - set particular bit in the scan mask | |
309 | * @indio_dev: the iio device | |
310 | * @buffer: the buffer whose scan mask we are interested in | |
311 | * @bit: the bit to be set. | |
312 | * | |
313 | * Note that at this point we have no way of knowing what other | |
314 | * buffers might request, hence this code only verifies that the | |
315 | * individual buffers request is plausible. | |
316 | */ | |
317 | static int iio_scan_mask_set(struct iio_dev *indio_dev, | |
318 | struct iio_buffer *buffer, int bit) | |
319 | { | |
320 | const unsigned long *mask; | |
321 | unsigned long *trialmask; | |
322 | ||
20ea39ef LPC |
323 | trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), |
324 | sizeof(*trialmask), GFP_KERNEL); | |
217a5cf0 LPC |
325 | if (trialmask == NULL) |
326 | return -ENOMEM; | |
327 | if (!indio_dev->masklength) { | |
231bfe53 | 328 | WARN(1, "Trying to set scanmask prior to registering buffer\n"); |
217a5cf0 LPC |
329 | goto err_invalid_mask; |
330 | } | |
331 | bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); | |
332 | set_bit(bit, trialmask); | |
333 | ||
334 | if (!iio_validate_scan_mask(indio_dev, trialmask)) | |
335 | goto err_invalid_mask; | |
336 | ||
337 | if (indio_dev->available_scan_masks) { | |
338 | mask = iio_scan_mask_match(indio_dev->available_scan_masks, | |
339 | indio_dev->masklength, | |
1e1ec286 | 340 | trialmask, false); |
217a5cf0 LPC |
341 | if (!mask) |
342 | goto err_invalid_mask; | |
343 | } | |
344 | bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); | |
345 | ||
3862828a | 346 | bitmap_free(trialmask); |
217a5cf0 LPC |
347 | |
348 | return 0; | |
349 | ||
350 | err_invalid_mask: | |
3862828a | 351 | bitmap_free(trialmask); |
217a5cf0 LPC |
352 | return -EINVAL; |
353 | } | |
354 | ||
14555b14 | 355 | static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) |
8d213f24 | 356 | { |
14555b14 | 357 | clear_bit(bit, buffer->scan_mask); |
8d213f24 JC |
358 | return 0; |
359 | } | |
360 | ||
c2bf8d5f JC |
361 | static int iio_scan_mask_query(struct iio_dev *indio_dev, |
362 | struct iio_buffer *buffer, int bit) | |
363 | { | |
364 | if (bit > indio_dev->masklength) | |
365 | return -EINVAL; | |
366 | ||
367 | if (!buffer->scan_mask) | |
368 | return 0; | |
369 | ||
370 | /* Ensure return value is 0 or 1. */ | |
371 | return !!test_bit(bit, buffer->scan_mask); | |
372 | }; | |
373 | ||
8d213f24 JC |
374 | static ssize_t iio_scan_el_store(struct device *dev, |
375 | struct device_attribute *attr, | |
376 | const char *buf, | |
377 | size_t len) | |
378 | { | |
a714af27 | 379 | int ret; |
8d213f24 | 380 | bool state; |
e53f5ac5 | 381 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
14555b14 | 382 | struct iio_buffer *buffer = indio_dev->buffer; |
8d213f24 JC |
383 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); |
384 | ||
a714af27 JC |
385 | ret = strtobool(buf, &state); |
386 | if (ret < 0) | |
387 | return ret; | |
8d213f24 | 388 | mutex_lock(&indio_dev->mlock); |
705ee2c9 | 389 | if (iio_buffer_is_active(indio_dev->buffer)) { |
8d213f24 JC |
390 | ret = -EBUSY; |
391 | goto error_ret; | |
392 | } | |
f79a9098 | 393 | ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); |
8d213f24 JC |
394 | if (ret < 0) |
395 | goto error_ret; | |
396 | if (!state && ret) { | |
14555b14 | 397 | ret = iio_scan_mask_clear(buffer, this_attr->address); |
8d213f24 JC |
398 | if (ret) |
399 | goto error_ret; | |
400 | } else if (state && !ret) { | |
f79a9098 | 401 | ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); |
8d213f24 JC |
402 | if (ret) |
403 | goto error_ret; | |
404 | } | |
405 | ||
406 | error_ret: | |
407 | mutex_unlock(&indio_dev->mlock); | |
408 | ||
5a2a6e11 | 409 | return ret < 0 ? ret : len; |
8d213f24 JC |
410 | |
411 | } | |
412 | ||
413 | static ssize_t iio_scan_el_ts_show(struct device *dev, | |
414 | struct device_attribute *attr, | |
415 | char *buf) | |
416 | { | |
e53f5ac5 | 417 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
f8c6f4e9 | 418 | return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp); |
8d213f24 JC |
419 | } |
420 | ||
421 | static ssize_t iio_scan_el_ts_store(struct device *dev, | |
422 | struct device_attribute *attr, | |
423 | const char *buf, | |
424 | size_t len) | |
425 | { | |
a714af27 | 426 | int ret; |
e53f5ac5 | 427 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
8d213f24 | 428 | bool state; |
1aa04278 | 429 | |
a714af27 JC |
430 | ret = strtobool(buf, &state); |
431 | if (ret < 0) | |
432 | return ret; | |
433 | ||
8d213f24 | 434 | mutex_lock(&indio_dev->mlock); |
705ee2c9 | 435 | if (iio_buffer_is_active(indio_dev->buffer)) { |
8d213f24 JC |
436 | ret = -EBUSY; |
437 | goto error_ret; | |
438 | } | |
14555b14 | 439 | indio_dev->buffer->scan_timestamp = state; |
8d213f24 JC |
440 | error_ret: |
441 | mutex_unlock(&indio_dev->mlock); | |
442 | ||
443 | return ret ? ret : len; | |
444 | } | |
445 | ||
14555b14 JC |
446 | static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, |
447 | const struct iio_chan_spec *chan) | |
1d892719 | 448 | { |
26d25ae3 | 449 | int ret, attrcount = 0; |
14555b14 | 450 | struct iio_buffer *buffer = indio_dev->buffer; |
1d892719 | 451 | |
26d25ae3 | 452 | ret = __iio_add_chan_devattr("index", |
1d892719 JC |
453 | chan, |
454 | &iio_show_scan_index, | |
455 | NULL, | |
456 | 0, | |
3704432f | 457 | IIO_SEPARATE, |
1aa04278 | 458 | &indio_dev->dev, |
14555b14 | 459 | &buffer->scan_el_dev_attr_list); |
1d892719 | 460 | if (ret) |
92825ff9 | 461 | return ret; |
26d25ae3 JC |
462 | attrcount++; |
463 | ret = __iio_add_chan_devattr("type", | |
1d892719 JC |
464 | chan, |
465 | &iio_show_fixed_type, | |
466 | NULL, | |
467 | 0, | |
468 | 0, | |
1aa04278 | 469 | &indio_dev->dev, |
14555b14 | 470 | &buffer->scan_el_dev_attr_list); |
1d892719 | 471 | if (ret) |
92825ff9 | 472 | return ret; |
26d25ae3 | 473 | attrcount++; |
a88b3ebc | 474 | if (chan->type != IIO_TIMESTAMP) |
26d25ae3 | 475 | ret = __iio_add_chan_devattr("en", |
a88b3ebc JC |
476 | chan, |
477 | &iio_scan_el_show, | |
478 | &iio_scan_el_store, | |
479 | chan->scan_index, | |
480 | 0, | |
1aa04278 | 481 | &indio_dev->dev, |
14555b14 | 482 | &buffer->scan_el_dev_attr_list); |
a88b3ebc | 483 | else |
26d25ae3 | 484 | ret = __iio_add_chan_devattr("en", |
a88b3ebc JC |
485 | chan, |
486 | &iio_scan_el_ts_show, | |
487 | &iio_scan_el_ts_store, | |
488 | chan->scan_index, | |
489 | 0, | |
1aa04278 | 490 | &indio_dev->dev, |
14555b14 | 491 | &buffer->scan_el_dev_attr_list); |
9572588c | 492 | if (ret) |
92825ff9 | 493 | return ret; |
26d25ae3 JC |
494 | attrcount++; |
495 | ret = attrcount; | |
1d892719 JC |
496 | return ret; |
497 | } | |
498 | ||
08e7e0ad LPC |
499 | static ssize_t iio_buffer_read_length(struct device *dev, |
500 | struct device_attribute *attr, | |
501 | char *buf) | |
7026ea4b | 502 | { |
e53f5ac5 | 503 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
14555b14 | 504 | struct iio_buffer *buffer = indio_dev->buffer; |
7026ea4b | 505 | |
37495660 | 506 | return sprintf(buf, "%d\n", buffer->length); |
7026ea4b | 507 | } |
7026ea4b | 508 | |
08e7e0ad LPC |
509 | static ssize_t iio_buffer_write_length(struct device *dev, |
510 | struct device_attribute *attr, | |
511 | const char *buf, size_t len) | |
7026ea4b | 512 | { |
e53f5ac5 | 513 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
14555b14 | 514 | struct iio_buffer *buffer = indio_dev->buffer; |
948ad205 LPC |
515 | unsigned int val; |
516 | int ret; | |
8d213f24 | 517 | |
948ad205 | 518 | ret = kstrtouint(buf, 10, &val); |
7026ea4b JC |
519 | if (ret) |
520 | return ret; | |
521 | ||
37495660 LPC |
522 | if (val == buffer->length) |
523 | return len; | |
7026ea4b | 524 | |
e38c79e0 | 525 | mutex_lock(&indio_dev->mlock); |
705ee2c9 | 526 | if (iio_buffer_is_active(indio_dev->buffer)) { |
e38c79e0 LPC |
527 | ret = -EBUSY; |
528 | } else { | |
8d92db28 | 529 | buffer->access->set_length(buffer, val); |
e38c79e0 | 530 | ret = 0; |
7026ea4b | 531 | } |
37d34556 JC |
532 | if (ret) |
533 | goto out; | |
534 | if (buffer->length && buffer->length < buffer->watermark) | |
535 | buffer->watermark = buffer->length; | |
536 | out: | |
e38c79e0 | 537 | mutex_unlock(&indio_dev->mlock); |
7026ea4b | 538 | |
e38c79e0 | 539 | return ret ? ret : len; |
7026ea4b | 540 | } |
7026ea4b | 541 | |
08e7e0ad LPC |
542 | static ssize_t iio_buffer_show_enable(struct device *dev, |
543 | struct device_attribute *attr, | |
544 | char *buf) | |
7026ea4b | 545 | { |
e53f5ac5 | 546 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
705ee2c9 | 547 | return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer)); |
7026ea4b | 548 | } |
7026ea4b | 549 | |
182b4905 LPC |
550 | static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev, |
551 | unsigned int scan_index) | |
552 | { | |
553 | const struct iio_chan_spec *ch; | |
554 | unsigned int bytes; | |
555 | ||
556 | ch = iio_find_channel_from_si(indio_dev, scan_index); | |
557 | bytes = ch->scan_type.storagebits / 8; | |
558 | if (ch->scan_type.repeat > 1) | |
559 | bytes *= ch->scan_type.repeat; | |
560 | return bytes; | |
561 | } | |
562 | ||
563 | static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev) | |
564 | { | |
565 | return iio_storage_bytes_for_si(indio_dev, | |
566 | indio_dev->scan_index_timestamp); | |
567 | } | |
568 | ||
183f4173 PM |
569 | static int iio_compute_scan_bytes(struct iio_dev *indio_dev, |
570 | const unsigned long *mask, bool timestamp) | |
959d2952 | 571 | { |
959d2952 JC |
572 | unsigned bytes = 0; |
573 | int length, i; | |
959d2952 JC |
574 | |
575 | /* How much space will the demuxed element take? */ | |
6b3b58ed | 576 | for_each_set_bit(i, mask, |
959d2952 | 577 | indio_dev->masklength) { |
182b4905 | 578 | length = iio_storage_bytes_for_si(indio_dev, i); |
959d2952 JC |
579 | bytes = ALIGN(bytes, length); |
580 | bytes += length; | |
581 | } | |
182b4905 | 582 | |
6b3b58ed | 583 | if (timestamp) { |
182b4905 | 584 | length = iio_storage_bytes_for_timestamp(indio_dev); |
959d2952 JC |
585 | bytes = ALIGN(bytes, length); |
586 | bytes += length; | |
587 | } | |
6b3b58ed JC |
588 | return bytes; |
589 | } | |
590 | ||
9e69c935 LPC |
591 | static void iio_buffer_activate(struct iio_dev *indio_dev, |
592 | struct iio_buffer *buffer) | |
593 | { | |
594 | iio_buffer_get(buffer); | |
595 | list_add(&buffer->buffer_list, &indio_dev->buffer_list); | |
596 | } | |
597 | ||
598 | static void iio_buffer_deactivate(struct iio_buffer *buffer) | |
599 | { | |
600 | list_del_init(&buffer->buffer_list); | |
37d34556 | 601 | wake_up_interruptible(&buffer->pollq); |
9e69c935 LPC |
602 | iio_buffer_put(buffer); |
603 | } | |
604 | ||
1250186a LPC |
605 | static void iio_buffer_deactivate_all(struct iio_dev *indio_dev) |
606 | { | |
607 | struct iio_buffer *buffer, *_buffer; | |
608 | ||
609 | list_for_each_entry_safe(buffer, _buffer, | |
610 | &indio_dev->buffer_list, buffer_list) | |
611 | iio_buffer_deactivate(buffer); | |
612 | } | |
613 | ||
e18a2ad4 LPC |
614 | static int iio_buffer_enable(struct iio_buffer *buffer, |
615 | struct iio_dev *indio_dev) | |
616 | { | |
617 | if (!buffer->access->enable) | |
618 | return 0; | |
619 | return buffer->access->enable(buffer, indio_dev); | |
620 | } | |
621 | ||
622 | static int iio_buffer_disable(struct iio_buffer *buffer, | |
623 | struct iio_dev *indio_dev) | |
624 | { | |
625 | if (!buffer->access->disable) | |
626 | return 0; | |
627 | return buffer->access->disable(buffer, indio_dev); | |
628 | } | |
629 | ||
8e050996 LPC |
630 | static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev, |
631 | struct iio_buffer *buffer) | |
632 | { | |
633 | unsigned int bytes; | |
634 | ||
635 | if (!buffer->access->set_bytes_per_datum) | |
636 | return; | |
637 | ||
638 | bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask, | |
639 | buffer->scan_timestamp); | |
640 | ||
641 | buffer->access->set_bytes_per_datum(buffer, bytes); | |
642 | } | |
643 | ||
fcc1b2f5 LPC |
644 | static int iio_buffer_request_update(struct iio_dev *indio_dev, |
645 | struct iio_buffer *buffer) | |
646 | { | |
647 | int ret; | |
648 | ||
649 | iio_buffer_update_bytes_per_datum(indio_dev, buffer); | |
650 | if (buffer->access->request_update) { | |
651 | ret = buffer->access->request_update(buffer); | |
652 | if (ret) { | |
653 | dev_dbg(&indio_dev->dev, | |
654 | "Buffer not started: buffer parameter update failed (%d)\n", | |
655 | ret); | |
656 | return ret; | |
657 | } | |
658 | } | |
659 | ||
660 | return 0; | |
661 | } | |
662 | ||
248be5aa LPC |
663 | static void iio_free_scan_mask(struct iio_dev *indio_dev, |
664 | const unsigned long *mask) | |
665 | { | |
666 | /* If the mask is dynamically allocated free it, otherwise do nothing */ | |
667 | if (!indio_dev->available_scan_masks) | |
3862828a | 668 | bitmap_free(mask); |
248be5aa LPC |
669 | } |
670 | ||
6e509c4d LPC |
671 | struct iio_device_config { |
672 | unsigned int mode; | |
f0566c0c | 673 | unsigned int watermark; |
6e509c4d LPC |
674 | const unsigned long *scan_mask; |
675 | unsigned int scan_bytes; | |
676 | bool scan_timestamp; | |
677 | }; | |
678 | ||
679 | static int iio_verify_update(struct iio_dev *indio_dev, | |
680 | struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer, | |
681 | struct iio_device_config *config) | |
682 | { | |
683 | unsigned long *compound_mask; | |
684 | const unsigned long *scan_mask; | |
1e1ec286 | 685 | bool strict_scanmask = false; |
6e509c4d LPC |
686 | struct iio_buffer *buffer; |
687 | bool scan_timestamp; | |
225d59ad | 688 | unsigned int modes; |
6e509c4d LPC |
689 | |
690 | memset(config, 0, sizeof(*config)); | |
1bef2c1d | 691 | config->watermark = ~0; |
6e509c4d LPC |
692 | |
693 | /* | |
694 | * If there is just one buffer and we are removing it there is nothing | |
695 | * to verify. | |
696 | */ | |
697 | if (remove_buffer && !insert_buffer && | |
698 | list_is_singular(&indio_dev->buffer_list)) | |
699 | return 0; | |
700 | ||
225d59ad LPC |
701 | modes = indio_dev->modes; |
702 | ||
703 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { | |
704 | if (buffer == remove_buffer) | |
705 | continue; | |
706 | modes &= buffer->access->modes; | |
f0566c0c | 707 | config->watermark = min(config->watermark, buffer->watermark); |
225d59ad LPC |
708 | } |
709 | ||
f0566c0c | 710 | if (insert_buffer) { |
225d59ad | 711 | modes &= insert_buffer->access->modes; |
f0566c0c LPC |
712 | config->watermark = min(config->watermark, |
713 | insert_buffer->watermark); | |
714 | } | |
225d59ad | 715 | |
6e509c4d | 716 | /* Definitely possible for devices to support both of these. */ |
225d59ad | 717 | if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) { |
6e509c4d | 718 | config->mode = INDIO_BUFFER_TRIGGERED; |
225d59ad | 719 | } else if (modes & INDIO_BUFFER_HARDWARE) { |
1e1ec286 LPC |
720 | /* |
721 | * Keep things simple for now and only allow a single buffer to | |
722 | * be connected in hardware mode. | |
723 | */ | |
724 | if (insert_buffer && !list_empty(&indio_dev->buffer_list)) | |
725 | return -EINVAL; | |
6e509c4d | 726 | config->mode = INDIO_BUFFER_HARDWARE; |
1e1ec286 | 727 | strict_scanmask = true; |
225d59ad | 728 | } else if (modes & INDIO_BUFFER_SOFTWARE) { |
6e509c4d LPC |
729 | config->mode = INDIO_BUFFER_SOFTWARE; |
730 | } else { | |
731 | /* Can only occur on first buffer */ | |
732 | if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) | |
733 | dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n"); | |
734 | return -EINVAL; | |
735 | } | |
736 | ||
737 | /* What scan mask do we actually have? */ | |
3862828a | 738 | compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL); |
6e509c4d LPC |
739 | if (compound_mask == NULL) |
740 | return -ENOMEM; | |
741 | ||
742 | scan_timestamp = false; | |
743 | ||
744 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { | |
745 | if (buffer == remove_buffer) | |
746 | continue; | |
747 | bitmap_or(compound_mask, compound_mask, buffer->scan_mask, | |
748 | indio_dev->masklength); | |
749 | scan_timestamp |= buffer->scan_timestamp; | |
750 | } | |
751 | ||
752 | if (insert_buffer) { | |
753 | bitmap_or(compound_mask, compound_mask, | |
754 | insert_buffer->scan_mask, indio_dev->masklength); | |
755 | scan_timestamp |= insert_buffer->scan_timestamp; | |
756 | } | |
757 | ||
758 | if (indio_dev->available_scan_masks) { | |
759 | scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks, | |
760 | indio_dev->masklength, | |
1e1ec286 LPC |
761 | compound_mask, |
762 | strict_scanmask); | |
3862828a | 763 | bitmap_free(compound_mask); |
6e509c4d LPC |
764 | if (scan_mask == NULL) |
765 | return -EINVAL; | |
766 | } else { | |
767 | scan_mask = compound_mask; | |
768 | } | |
769 | ||
770 | config->scan_bytes = iio_compute_scan_bytes(indio_dev, | |
771 | scan_mask, scan_timestamp); | |
772 | config->scan_mask = scan_mask; | |
773 | config->scan_timestamp = scan_timestamp; | |
774 | ||
775 | return 0; | |
776 | } | |
777 | ||
78c9981f JC |
778 | /** |
779 | * struct iio_demux_table - table describing demux memcpy ops | |
780 | * @from: index to copy from | |
781 | * @to: index to copy to | |
782 | * @length: how many bytes to copy | |
783 | * @l: list head used for management | |
784 | */ | |
785 | struct iio_demux_table { | |
786 | unsigned from; | |
787 | unsigned to; | |
788 | unsigned length; | |
789 | struct list_head l; | |
790 | }; | |
791 | ||
792 | static void iio_buffer_demux_free(struct iio_buffer *buffer) | |
793 | { | |
794 | struct iio_demux_table *p, *q; | |
795 | list_for_each_entry_safe(p, q, &buffer->demux_list, l) { | |
796 | list_del(&p->l); | |
797 | kfree(p); | |
798 | } | |
799 | } | |
800 | ||
801 | static int iio_buffer_add_demux(struct iio_buffer *buffer, | |
802 | struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc, | |
803 | unsigned int length) | |
804 | { | |
805 | ||
806 | if (*p && (*p)->from + (*p)->length == in_loc && | |
807 | (*p)->to + (*p)->length == out_loc) { | |
808 | (*p)->length += length; | |
809 | } else { | |
810 | *p = kmalloc(sizeof(**p), GFP_KERNEL); | |
811 | if (*p == NULL) | |
812 | return -ENOMEM; | |
813 | (*p)->from = in_loc; | |
814 | (*p)->to = out_loc; | |
815 | (*p)->length = length; | |
816 | list_add_tail(&(*p)->l, &buffer->demux_list); | |
817 | } | |
818 | ||
819 | return 0; | |
820 | } | |
821 | ||
822 | static int iio_buffer_update_demux(struct iio_dev *indio_dev, | |
823 | struct iio_buffer *buffer) | |
824 | { | |
825 | int ret, in_ind = -1, out_ind, length; | |
826 | unsigned in_loc = 0, out_loc = 0; | |
827 | struct iio_demux_table *p = NULL; | |
828 | ||
829 | /* Clear out any old demux */ | |
830 | iio_buffer_demux_free(buffer); | |
831 | kfree(buffer->demux_bounce); | |
832 | buffer->demux_bounce = NULL; | |
833 | ||
834 | /* First work out which scan mode we will actually have */ | |
835 | if (bitmap_equal(indio_dev->active_scan_mask, | |
836 | buffer->scan_mask, | |
837 | indio_dev->masklength)) | |
838 | return 0; | |
839 | ||
840 | /* Now we have the two masks, work from least sig and build up sizes */ | |
841 | for_each_set_bit(out_ind, | |
842 | buffer->scan_mask, | |
843 | indio_dev->masklength) { | |
844 | in_ind = find_next_bit(indio_dev->active_scan_mask, | |
845 | indio_dev->masklength, | |
846 | in_ind + 1); | |
847 | while (in_ind != out_ind) { | |
848 | in_ind = find_next_bit(indio_dev->active_scan_mask, | |
849 | indio_dev->masklength, | |
850 | in_ind + 1); | |
851 | length = iio_storage_bytes_for_si(indio_dev, in_ind); | |
852 | /* Make sure we are aligned */ | |
853 | in_loc = roundup(in_loc, length) + length; | |
854 | } | |
855 | length = iio_storage_bytes_for_si(indio_dev, in_ind); | |
856 | out_loc = roundup(out_loc, length); | |
857 | in_loc = roundup(in_loc, length); | |
858 | ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); | |
859 | if (ret) | |
860 | goto error_clear_mux_table; | |
861 | out_loc += length; | |
862 | in_loc += length; | |
863 | } | |
864 | /* Relies on scan_timestamp being last */ | |
865 | if (buffer->scan_timestamp) { | |
866 | length = iio_storage_bytes_for_timestamp(indio_dev); | |
867 | out_loc = roundup(out_loc, length); | |
868 | in_loc = roundup(in_loc, length); | |
869 | ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); | |
870 | if (ret) | |
871 | goto error_clear_mux_table; | |
872 | out_loc += length; | |
873 | in_loc += length; | |
874 | } | |
875 | buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); | |
876 | if (buffer->demux_bounce == NULL) { | |
877 | ret = -ENOMEM; | |
878 | goto error_clear_mux_table; | |
879 | } | |
880 | return 0; | |
881 | ||
882 | error_clear_mux_table: | |
883 | iio_buffer_demux_free(buffer); | |
884 | ||
885 | return ret; | |
886 | } | |
887 | ||
888 | static int iio_update_demux(struct iio_dev *indio_dev) | |
889 | { | |
890 | struct iio_buffer *buffer; | |
891 | int ret; | |
892 | ||
893 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { | |
894 | ret = iio_buffer_update_demux(indio_dev, buffer); | |
895 | if (ret < 0) | |
896 | goto error_clear_mux_table; | |
897 | } | |
898 | return 0; | |
899 | ||
900 | error_clear_mux_table: | |
901 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) | |
902 | iio_buffer_demux_free(buffer); | |
903 | ||
904 | return ret; | |
905 | } | |
906 | ||
623d74e3 LPC |
907 | static int iio_enable_buffers(struct iio_dev *indio_dev, |
908 | struct iio_device_config *config) | |
6b3b58ed | 909 | { |
e18a2ad4 | 910 | struct iio_buffer *buffer; |
84b36ce5 | 911 | int ret; |
fcc1b2f5 | 912 | |
623d74e3 LPC |
913 | indio_dev->active_scan_mask = config->scan_mask; |
914 | indio_dev->scan_timestamp = config->scan_timestamp; | |
915 | indio_dev->scan_bytes = config->scan_bytes; | |
aff1eb4e | 916 | |
5ada4ea9 JC |
917 | iio_update_demux(indio_dev); |
918 | ||
84b36ce5 JC |
919 | /* Wind up again */ |
920 | if (indio_dev->setup_ops->preenable) { | |
921 | ret = indio_dev->setup_ops->preenable(indio_dev); | |
922 | if (ret) { | |
63223c5f | 923 | dev_dbg(&indio_dev->dev, |
bec1889d | 924 | "Buffer not started: buffer preenable failed (%d)\n", ret); |
623d74e3 | 925 | goto err_undo_config; |
84b36ce5 JC |
926 | } |
927 | } | |
6e509c4d | 928 | |
84b36ce5 JC |
929 | if (indio_dev->info->update_scan_mode) { |
930 | ret = indio_dev->info | |
5ada4ea9 JC |
931 | ->update_scan_mode(indio_dev, |
932 | indio_dev->active_scan_mask); | |
84b36ce5 | 933 | if (ret < 0) { |
63223c5f LPC |
934 | dev_dbg(&indio_dev->dev, |
935 | "Buffer not started: update scan mode failed (%d)\n", | |
936 | ret); | |
623d74e3 | 937 | goto err_run_postdisable; |
84b36ce5 JC |
938 | } |
939 | } | |
6e509c4d | 940 | |
f0566c0c LPC |
941 | if (indio_dev->info->hwfifo_set_watermark) |
942 | indio_dev->info->hwfifo_set_watermark(indio_dev, | |
943 | config->watermark); | |
944 | ||
e18a2ad4 LPC |
945 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { |
946 | ret = iio_buffer_enable(buffer, indio_dev); | |
947 | if (ret) | |
948 | goto err_disable_buffers; | |
949 | } | |
950 | ||
623d74e3 | 951 | indio_dev->currentmode = config->mode; |
84b36ce5 JC |
952 | |
953 | if (indio_dev->setup_ops->postenable) { | |
954 | ret = indio_dev->setup_ops->postenable(indio_dev); | |
955 | if (ret) { | |
63223c5f | 956 | dev_dbg(&indio_dev->dev, |
bec1889d | 957 | "Buffer not started: postenable failed (%d)\n", ret); |
e18a2ad4 | 958 | goto err_disable_buffers; |
84b36ce5 JC |
959 | } |
960 | } | |
961 | ||
6e509c4d | 962 | return 0; |
84b36ce5 | 963 | |
e18a2ad4 LPC |
964 | err_disable_buffers: |
965 | list_for_each_entry_continue_reverse(buffer, &indio_dev->buffer_list, | |
966 | buffer_list) | |
967 | iio_buffer_disable(buffer, indio_dev); | |
623d74e3 | 968 | err_run_postdisable: |
84b36ce5 | 969 | indio_dev->currentmode = INDIO_DIRECT_MODE; |
84b36ce5 JC |
970 | if (indio_dev->setup_ops->postdisable) |
971 | indio_dev->setup_ops->postdisable(indio_dev); | |
623d74e3 LPC |
972 | err_undo_config: |
973 | indio_dev->active_scan_mask = NULL; | |
974 | ||
84b36ce5 | 975 | return ret; |
623d74e3 LPC |
976 | } |
977 | ||
978 | static int iio_disable_buffers(struct iio_dev *indio_dev) | |
979 | { | |
e18a2ad4 | 980 | struct iio_buffer *buffer; |
1250186a LPC |
981 | int ret = 0; |
982 | int ret2; | |
623d74e3 LPC |
983 | |
984 | /* Wind down existing buffers - iff there are any */ | |
985 | if (list_empty(&indio_dev->buffer_list)) | |
986 | return 0; | |
987 | ||
1250186a LPC |
988 | /* |
989 | * If things go wrong at some step in disable we still need to continue | |
990 | * to perform the other steps, otherwise we leave the device in a | |
991 | * inconsistent state. We return the error code for the first error we | |
992 | * encountered. | |
993 | */ | |
994 | ||
623d74e3 | 995 | if (indio_dev->setup_ops->predisable) { |
1250186a LPC |
996 | ret2 = indio_dev->setup_ops->predisable(indio_dev); |
997 | if (ret2 && !ret) | |
998 | ret = ret2; | |
623d74e3 | 999 | } |
e18a2ad4 LPC |
1000 | |
1001 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { | |
1002 | ret2 = iio_buffer_disable(buffer, indio_dev); | |
1003 | if (ret2 && !ret) | |
1004 | ret = ret2; | |
1005 | } | |
623d74e3 LPC |
1006 | |
1007 | indio_dev->currentmode = INDIO_DIRECT_MODE; | |
1008 | ||
1009 | if (indio_dev->setup_ops->postdisable) { | |
1250186a LPC |
1010 | ret2 = indio_dev->setup_ops->postdisable(indio_dev); |
1011 | if (ret2 && !ret) | |
1012 | ret = ret2; | |
623d74e3 LPC |
1013 | } |
1014 | ||
1250186a LPC |
1015 | iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask); |
1016 | indio_dev->active_scan_mask = NULL; | |
1017 | ||
1018 | return ret; | |
623d74e3 LPC |
1019 | } |
1020 | ||
1021 | static int __iio_update_buffers(struct iio_dev *indio_dev, | |
1022 | struct iio_buffer *insert_buffer, | |
1023 | struct iio_buffer *remove_buffer) | |
1024 | { | |
623d74e3 | 1025 | struct iio_device_config new_config; |
1250186a | 1026 | int ret; |
623d74e3 LPC |
1027 | |
1028 | ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer, | |
1029 | &new_config); | |
1030 | if (ret) | |
1031 | return ret; | |
1032 | ||
1033 | if (insert_buffer) { | |
1034 | ret = iio_buffer_request_update(indio_dev, insert_buffer); | |
1035 | if (ret) | |
1036 | goto err_free_config; | |
1037 | } | |
1038 | ||
623d74e3 | 1039 | ret = iio_disable_buffers(indio_dev); |
1250186a LPC |
1040 | if (ret) |
1041 | goto err_deactivate_all; | |
623d74e3 LPC |
1042 | |
1043 | if (remove_buffer) | |
1044 | iio_buffer_deactivate(remove_buffer); | |
1045 | if (insert_buffer) | |
1046 | iio_buffer_activate(indio_dev, insert_buffer); | |
1047 | ||
1048 | /* If no buffers in list, we are done */ | |
1250186a | 1049 | if (list_empty(&indio_dev->buffer_list)) |
623d74e3 | 1050 | return 0; |
623d74e3 LPC |
1051 | |
1052 | ret = iio_enable_buffers(indio_dev, &new_config); | |
1250186a LPC |
1053 | if (ret) |
1054 | goto err_deactivate_all; | |
623d74e3 | 1055 | |
623d74e3 | 1056 | return 0; |
6e509c4d | 1057 | |
1250186a LPC |
1058 | err_deactivate_all: |
1059 | /* | |
1060 | * We've already verified that the config is valid earlier. If things go | |
1061 | * wrong in either enable or disable the most likely reason is an IO | |
1062 | * error from the device. In this case there is no good recovery | |
1063 | * strategy. Just make sure to disable everything and leave the device | |
1064 | * in a sane state. With a bit of luck the device might come back to | |
1065 | * life again later and userspace can try again. | |
1066 | */ | |
1067 | iio_buffer_deactivate_all(indio_dev); | |
1068 | ||
6e509c4d LPC |
1069 | err_free_config: |
1070 | iio_free_scan_mask(indio_dev, new_config.scan_mask); | |
1071 | return ret; | |
84b36ce5 | 1072 | } |
a9519456 LPC |
1073 | |
1074 | int iio_update_buffers(struct iio_dev *indio_dev, | |
1075 | struct iio_buffer *insert_buffer, | |
1076 | struct iio_buffer *remove_buffer) | |
1077 | { | |
1078 | int ret; | |
1079 | ||
3909fab5 LPC |
1080 | if (insert_buffer == remove_buffer) |
1081 | return 0; | |
1082 | ||
a9519456 LPC |
1083 | mutex_lock(&indio_dev->info_exist_lock); |
1084 | mutex_lock(&indio_dev->mlock); | |
1085 | ||
3909fab5 LPC |
1086 | if (insert_buffer && iio_buffer_is_active(insert_buffer)) |
1087 | insert_buffer = NULL; | |
1088 | ||
1089 | if (remove_buffer && !iio_buffer_is_active(remove_buffer)) | |
1090 | remove_buffer = NULL; | |
1091 | ||
1092 | if (!insert_buffer && !remove_buffer) { | |
1093 | ret = 0; | |
1094 | goto out_unlock; | |
1095 | } | |
1096 | ||
a9519456 LPC |
1097 | if (indio_dev->info == NULL) { |
1098 | ret = -ENODEV; | |
1099 | goto out_unlock; | |
1100 | } | |
1101 | ||
1102 | ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer); | |
1103 | ||
1104 | out_unlock: | |
1105 | mutex_unlock(&indio_dev->mlock); | |
1106 | mutex_unlock(&indio_dev->info_exist_lock); | |
1107 | ||
1108 | return ret; | |
1109 | } | |
84b36ce5 JC |
1110 | EXPORT_SYMBOL_GPL(iio_update_buffers); |
1111 | ||
623d74e3 LPC |
1112 | void iio_disable_all_buffers(struct iio_dev *indio_dev) |
1113 | { | |
623d74e3 | 1114 | iio_disable_buffers(indio_dev); |
1250186a | 1115 | iio_buffer_deactivate_all(indio_dev); |
623d74e3 LPC |
1116 | } |
1117 | ||
08e7e0ad LPC |
1118 | static ssize_t iio_buffer_store_enable(struct device *dev, |
1119 | struct device_attribute *attr, | |
1120 | const char *buf, | |
1121 | size_t len) | |
84b36ce5 JC |
1122 | { |
1123 | int ret; | |
1124 | bool requested_state; | |
1125 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | |
84b36ce5 JC |
1126 | bool inlist; |
1127 | ||
1128 | ret = strtobool(buf, &requested_state); | |
1129 | if (ret < 0) | |
1130 | return ret; | |
1131 | ||
1132 | mutex_lock(&indio_dev->mlock); | |
1133 | ||
1134 | /* Find out if it is in the list */ | |
705ee2c9 | 1135 | inlist = iio_buffer_is_active(indio_dev->buffer); |
84b36ce5 JC |
1136 | /* Already in desired state */ |
1137 | if (inlist == requested_state) | |
1138 | goto done; | |
1139 | ||
1140 | if (requested_state) | |
a9519456 | 1141 | ret = __iio_update_buffers(indio_dev, |
84b36ce5 JC |
1142 | indio_dev->buffer, NULL); |
1143 | else | |
a9519456 | 1144 | ret = __iio_update_buffers(indio_dev, |
84b36ce5 JC |
1145 | NULL, indio_dev->buffer); |
1146 | ||
84b36ce5 JC |
1147 | done: |
1148 | mutex_unlock(&indio_dev->mlock); | |
1149 | return (ret < 0) ? ret : len; | |
1150 | } | |
84b36ce5 | 1151 | |
d967cb6b LPC |
1152 | static const char * const iio_scan_elements_group_name = "scan_elements"; |
1153 | ||
37d34556 JC |
1154 | static ssize_t iio_buffer_show_watermark(struct device *dev, |
1155 | struct device_attribute *attr, | |
1156 | char *buf) | |
1157 | { | |
1158 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | |
1159 | struct iio_buffer *buffer = indio_dev->buffer; | |
1160 | ||
1161 | return sprintf(buf, "%u\n", buffer->watermark); | |
1162 | } | |
1163 | ||
1164 | static ssize_t iio_buffer_store_watermark(struct device *dev, | |
1165 | struct device_attribute *attr, | |
1166 | const char *buf, | |
1167 | size_t len) | |
1168 | { | |
1169 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | |
1170 | struct iio_buffer *buffer = indio_dev->buffer; | |
1171 | unsigned int val; | |
1172 | int ret; | |
1173 | ||
1174 | ret = kstrtouint(buf, 10, &val); | |
1175 | if (ret) | |
1176 | return ret; | |
1177 | if (!val) | |
1178 | return -EINVAL; | |
1179 | ||
1180 | mutex_lock(&indio_dev->mlock); | |
1181 | ||
1182 | if (val > buffer->length) { | |
1183 | ret = -EINVAL; | |
1184 | goto out; | |
1185 | } | |
1186 | ||
1187 | if (iio_buffer_is_active(indio_dev->buffer)) { | |
1188 | ret = -EBUSY; | |
1189 | goto out; | |
1190 | } | |
1191 | ||
1192 | buffer->watermark = val; | |
1193 | out: | |
1194 | mutex_unlock(&indio_dev->mlock); | |
1195 | ||
1196 | return ret ? ret : len; | |
1197 | } | |
1198 | ||
350f6c75 MF |
1199 | static ssize_t iio_dma_show_data_available(struct device *dev, |
1200 | struct device_attribute *attr, | |
1201 | char *buf) | |
1202 | { | |
1203 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | |
1204 | size_t bytes; | |
1205 | ||
1206 | bytes = iio_buffer_data_available(indio_dev->buffer); | |
1207 | ||
1208 | return sprintf(buf, "%zu\n", bytes); | |
1209 | } | |
1210 | ||
08e7e0ad LPC |
1211 | static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length, |
1212 | iio_buffer_write_length); | |
8d92db28 LPC |
1213 | static struct device_attribute dev_attr_length_ro = __ATTR(length, |
1214 | S_IRUGO, iio_buffer_read_length, NULL); | |
08e7e0ad LPC |
1215 | static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, |
1216 | iio_buffer_show_enable, iio_buffer_store_enable); | |
37d34556 JC |
1217 | static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR, |
1218 | iio_buffer_show_watermark, iio_buffer_store_watermark); | |
b440655b LPC |
1219 | static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark, |
1220 | S_IRUGO, iio_buffer_show_watermark, NULL); | |
350f6c75 MF |
1221 | static DEVICE_ATTR(data_available, S_IRUGO, |
1222 | iio_dma_show_data_available, NULL); | |
08e7e0ad | 1223 | |
6da9b382 OP |
1224 | static struct attribute *iio_buffer_attrs[] = { |
1225 | &dev_attr_length.attr, | |
1226 | &dev_attr_enable.attr, | |
37d34556 | 1227 | &dev_attr_watermark.attr, |
350f6c75 | 1228 | &dev_attr_data_available.attr, |
6da9b382 OP |
1229 | }; |
1230 | ||
d967cb6b LPC |
1231 | int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev) |
1232 | { | |
1233 | struct iio_dev_attr *p; | |
1234 | struct attribute **attr; | |
1235 | struct iio_buffer *buffer = indio_dev->buffer; | |
1236 | int ret, i, attrn, attrcount, attrcount_orig = 0; | |
1237 | const struct iio_chan_spec *channels; | |
1238 | ||
629bc023 LPC |
1239 | channels = indio_dev->channels; |
1240 | if (channels) { | |
1241 | int ml = indio_dev->masklength; | |
1242 | ||
1243 | for (i = 0; i < indio_dev->num_channels; i++) | |
1244 | ml = max(ml, channels[i].scan_index + 1); | |
1245 | indio_dev->masklength = ml; | |
1246 | } | |
1247 | ||
d967cb6b LPC |
1248 | if (!buffer) |
1249 | return 0; | |
1250 | ||
08e7e0ad LPC |
1251 | attrcount = 0; |
1252 | if (buffer->attrs) { | |
1253 | while (buffer->attrs[attrcount] != NULL) | |
1254 | attrcount++; | |
1255 | } | |
1256 | ||
6da9b382 OP |
1257 | attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1, |
1258 | sizeof(struct attribute *), GFP_KERNEL); | |
1259 | if (!attr) | |
08e7e0ad LPC |
1260 | return -ENOMEM; |
1261 | ||
6da9b382 OP |
1262 | memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs)); |
1263 | if (!buffer->access->set_length) | |
1264 | attr[0] = &dev_attr_length_ro.attr; | |
1265 | ||
b440655b LPC |
1266 | if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK) |
1267 | attr[2] = &dev_attr_watermark_ro.attr; | |
1268 | ||
08e7e0ad | 1269 | if (buffer->attrs) |
6da9b382 OP |
1270 | memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs, |
1271 | sizeof(struct attribute *) * attrcount); | |
1272 | ||
1273 | attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL; | |
1274 | ||
1275 | buffer->buffer_group.name = "buffer"; | |
1276 | buffer->buffer_group.attrs = attr; | |
08e7e0ad LPC |
1277 | |
1278 | indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group; | |
1279 | ||
d967cb6b LPC |
1280 | if (buffer->scan_el_attrs != NULL) { |
1281 | attr = buffer->scan_el_attrs->attrs; | |
1282 | while (*attr++ != NULL) | |
1283 | attrcount_orig++; | |
1284 | } | |
1285 | attrcount = attrcount_orig; | |
1286 | INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); | |
1287 | channels = indio_dev->channels; | |
1288 | if (channels) { | |
1289 | /* new magic */ | |
1290 | for (i = 0; i < indio_dev->num_channels; i++) { | |
1291 | if (channels[i].scan_index < 0) | |
1292 | continue; | |
1293 | ||
d967cb6b LPC |
1294 | ret = iio_buffer_add_channel_sysfs(indio_dev, |
1295 | &channels[i]); | |
1296 | if (ret < 0) | |
1297 | goto error_cleanup_dynamic; | |
1298 | attrcount += ret; | |
1299 | if (channels[i].type == IIO_TIMESTAMP) | |
1300 | indio_dev->scan_index_timestamp = | |
1301 | channels[i].scan_index; | |
1302 | } | |
1303 | if (indio_dev->masklength && buffer->scan_mask == NULL) { | |
3862828a AS |
1304 | buffer->scan_mask = bitmap_zalloc(indio_dev->masklength, |
1305 | GFP_KERNEL); | |
d967cb6b LPC |
1306 | if (buffer->scan_mask == NULL) { |
1307 | ret = -ENOMEM; | |
1308 | goto error_cleanup_dynamic; | |
1309 | } | |
1310 | } | |
1311 | } | |
1312 | ||
1313 | buffer->scan_el_group.name = iio_scan_elements_group_name; | |
1314 | ||
1315 | buffer->scan_el_group.attrs = kcalloc(attrcount + 1, | |
1316 | sizeof(buffer->scan_el_group.attrs[0]), | |
1317 | GFP_KERNEL); | |
1318 | if (buffer->scan_el_group.attrs == NULL) { | |
1319 | ret = -ENOMEM; | |
1320 | goto error_free_scan_mask; | |
1321 | } | |
1322 | if (buffer->scan_el_attrs) | |
1323 | memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, | |
1324 | sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); | |
1325 | attrn = attrcount_orig; | |
1326 | ||
1327 | list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) | |
1328 | buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; | |
1329 | indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; | |
1330 | ||
1331 | return 0; | |
1332 | ||
1333 | error_free_scan_mask: | |
3862828a | 1334 | bitmap_free(buffer->scan_mask); |
d967cb6b LPC |
1335 | error_cleanup_dynamic: |
1336 | iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list); | |
08e7e0ad | 1337 | kfree(indio_dev->buffer->buffer_group.attrs); |
d967cb6b LPC |
1338 | |
1339 | return ret; | |
1340 | } | |
1341 | ||
1342 | void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev) | |
1343 | { | |
1344 | if (!indio_dev->buffer) | |
1345 | return; | |
1346 | ||
3862828a | 1347 | bitmap_free(indio_dev->buffer->scan_mask); |
08e7e0ad | 1348 | kfree(indio_dev->buffer->buffer_group.attrs); |
d967cb6b LPC |
1349 | kfree(indio_dev->buffer->scan_el_group.attrs); |
1350 | iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list); | |
1351 | } | |
1352 | ||
81636632 LPC |
1353 | /** |
1354 | * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected | |
1355 | * @indio_dev: the iio device | |
1356 | * @mask: scan mask to be checked | |
1357 | * | |
1358 | * Return true if exactly one bit is set in the scan mask, false otherwise. It | |
1359 | * can be used for devices where only one channel can be active for sampling at | |
1360 | * a time. | |
1361 | */ | |
1362 | bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, | |
1363 | const unsigned long *mask) | |
1364 | { | |
1365 | return bitmap_weight(mask, indio_dev->masklength) == 1; | |
1366 | } | |
1367 | EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); | |
1368 | ||
5d65d920 LPC |
1369 | static const void *iio_demux(struct iio_buffer *buffer, |
1370 | const void *datain) | |
5ada4ea9 JC |
1371 | { |
1372 | struct iio_demux_table *t; | |
1373 | ||
1374 | if (list_empty(&buffer->demux_list)) | |
1375 | return datain; | |
1376 | list_for_each_entry(t, &buffer->demux_list, l) | |
1377 | memcpy(buffer->demux_bounce + t->to, | |
1378 | datain + t->from, t->length); | |
1379 | ||
1380 | return buffer->demux_bounce; | |
1381 | } | |
1382 | ||
5d65d920 | 1383 | static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) |
5ada4ea9 | 1384 | { |
5d65d920 | 1385 | const void *dataout = iio_demux(buffer, data); |
37d34556 JC |
1386 | int ret; |
1387 | ||
1388 | ret = buffer->access->store_to(buffer, dataout); | |
1389 | if (ret) | |
1390 | return ret; | |
5ada4ea9 | 1391 | |
37d34556 JC |
1392 | /* |
1393 | * We can't just test for watermark to decide if we wake the poll queue | |
1394 | * because read may request less samples than the watermark. | |
1395 | */ | |
a9a08845 | 1396 | wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM); |
37d34556 | 1397 | return 0; |
5ada4ea9 | 1398 | } |
5ada4ea9 | 1399 | |
315a19ec JC |
1400 | /** |
1401 | * iio_push_to_buffers() - push to a registered buffer. | |
1402 | * @indio_dev: iio_dev structure for device. | |
1403 | * @data: Full scan. | |
1404 | */ | |
5d65d920 | 1405 | int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data) |
84b36ce5 JC |
1406 | { |
1407 | int ret; | |
1408 | struct iio_buffer *buf; | |
1409 | ||
1410 | list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) { | |
1411 | ret = iio_push_to_buffer(buf, data); | |
1412 | if (ret < 0) | |
1413 | return ret; | |
1414 | } | |
1415 | ||
1416 | return 0; | |
1417 | } | |
1418 | EXPORT_SYMBOL_GPL(iio_push_to_buffers); | |
1419 | ||
9e69c935 LPC |
1420 | /** |
1421 | * iio_buffer_release() - Free a buffer's resources | |
1422 | * @ref: Pointer to the kref embedded in the iio_buffer struct | |
1423 | * | |
1424 | * This function is called when the last reference to the buffer has been | |
1425 | * dropped. It will typically free all resources allocated by the buffer. Do not | |
1426 | * call this function manually, always use iio_buffer_put() when done using a | |
1427 | * buffer. | |
1428 | */ | |
1429 | static void iio_buffer_release(struct kref *ref) | |
1430 | { | |
1431 | struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref); | |
1432 | ||
1433 | buffer->access->release(buffer); | |
1434 | } | |
1435 | ||
1436 | /** | |
1437 | * iio_buffer_get() - Grab a reference to the buffer | |
1438 | * @buffer: The buffer to grab a reference for, may be NULL | |
1439 | * | |
1440 | * Returns the pointer to the buffer that was passed into the function. | |
1441 | */ | |
1442 | struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer) | |
1443 | { | |
1444 | if (buffer) | |
1445 | kref_get(&buffer->ref); | |
1446 | ||
1447 | return buffer; | |
1448 | } | |
1449 | EXPORT_SYMBOL_GPL(iio_buffer_get); | |
1450 | ||
1451 | /** | |
1452 | * iio_buffer_put() - Release the reference to the buffer | |
1453 | * @buffer: The buffer to release the reference for, may be NULL | |
1454 | */ | |
1455 | void iio_buffer_put(struct iio_buffer *buffer) | |
1456 | { | |
1457 | if (buffer) | |
1458 | kref_put(&buffer->ref, iio_buffer_release); | |
1459 | } | |
1460 | EXPORT_SYMBOL_GPL(iio_buffer_put); | |
2b827ad5 JC |
1461 | |
1462 | /** | |
1463 | * iio_device_attach_buffer - Attach a buffer to a IIO device | |
1464 | * @indio_dev: The device the buffer should be attached to | |
1465 | * @buffer: The buffer to attach to the device | |
1466 | * | |
1467 | * This function attaches a buffer to a IIO device. The buffer stays attached to | |
1468 | * the device until the device is freed. The function should only be called at | |
1469 | * most once per device. | |
1470 | */ | |
1471 | void iio_device_attach_buffer(struct iio_dev *indio_dev, | |
1472 | struct iio_buffer *buffer) | |
1473 | { | |
1474 | indio_dev->buffer = iio_buffer_get(buffer); | |
1475 | } | |
1476 | EXPORT_SYMBOL_GPL(iio_device_attach_buffer); |