1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/anon_inodes.h>
4 #include <linux/atomic.h>
5 #include <linux/bitmap.h>
6 #include <linux/build_bug.h>
7 #include <linux/cdev.h>
8 #include <linux/compat.h>
9 #include <linux/compiler.h>
10 #include <linux/device.h>
11 #include <linux/err.h>
12 #include <linux/file.h>
13 #include <linux/gpio.h>
14 #include <linux/gpio/driver.h>
15 #include <linux/hte.h>
16 #include <linux/interrupt.h>
17 #include <linux/irqreturn.h>
18 #include <linux/kernel.h>
19 #include <linux/kfifo.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/pinctrl/consumer.h>
23 #include <linux/poll.h>
24 #include <linux/seq_file.h>
25 #include <linux/spinlock.h>
26 #include <linux/timekeeping.h>
27 #include <linux/uaccess.h>
28 #include <linux/workqueue.h>
30 #include <uapi/linux/gpio.h>
33 #include "gpiolib-cdev.h"
36 * Array sizes must ensure 64-bit alignment and not create holes in the
39 static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2));
40 static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8));
43 * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility
45 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8));
46 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8));
47 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8));
48 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8));
49 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8));
50 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8));
51 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8));
52 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8));
54 /* Character device interface to GPIO.
56 * The GPIO character device, /dev/gpiochipN, provides userspace an
57 * interface to gpiolib GPIOs via ioctl()s.
60 typedef __poll_t (*poll_fn)(struct file *, struct poll_table_struct *);
61 typedef long (*ioctl_fn)(struct file *, unsigned int, unsigned long);
62 typedef ssize_t (*read_fn)(struct file *, char __user *,
63 size_t count, loff_t *);
65 static __poll_t call_poll_locked(struct file *file,
66 struct poll_table_struct *wait,
67 struct gpio_device *gdev, poll_fn func)
71 down_read(&gdev->sem);
72 ret = func(file, wait);
78 static long call_ioctl_locked(struct file *file, unsigned int cmd,
79 unsigned long arg, struct gpio_device *gdev,
84 down_read(&gdev->sem);
85 ret = func(file, cmd, arg);
91 static ssize_t call_read_locked(struct file *file, char __user *buf,
92 size_t count, loff_t *f_ps,
93 struct gpio_device *gdev, read_fn func)
97 down_read(&gdev->sem);
98 ret = func(file, buf, count, f_ps);
105 * GPIO line handle management
108 #ifdef CONFIG_GPIO_CDEV_V1
110 * struct linehandle_state - contains the state of a userspace handle
111 * @gdev: the GPIO device the handle pertains to
112 * @label: consumer label used to tag descriptors
113 * @descs: the GPIO descriptors held by this handle
114 * @num_descs: the number of descriptors held in the descs array
116 struct linehandle_state {
117 struct gpio_device *gdev;
119 struct gpio_desc *descs[GPIOHANDLES_MAX];
123 #define GPIOHANDLE_REQUEST_VALID_FLAGS \
124 (GPIOHANDLE_REQUEST_INPUT | \
125 GPIOHANDLE_REQUEST_OUTPUT | \
126 GPIOHANDLE_REQUEST_ACTIVE_LOW | \
127 GPIOHANDLE_REQUEST_BIAS_PULL_UP | \
128 GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \
129 GPIOHANDLE_REQUEST_BIAS_DISABLE | \
130 GPIOHANDLE_REQUEST_OPEN_DRAIN | \
131 GPIOHANDLE_REQUEST_OPEN_SOURCE)
133 static int linehandle_validate_flags(u32 flags)
135 /* Return an error if an unknown flag is set */
136 if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
140 * Do not allow both INPUT & OUTPUT flags to be set as they are
143 if ((flags & GPIOHANDLE_REQUEST_INPUT) &&
144 (flags & GPIOHANDLE_REQUEST_OUTPUT))
148 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
149 * the hardware actually supports enabling both at the same time the
150 * electrical result would be disastrous.
152 if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
153 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
156 /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
157 if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) &&
158 ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
159 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
162 /* Bias flags only allowed for input or output mode. */
163 if (!((flags & GPIOHANDLE_REQUEST_INPUT) ||
164 (flags & GPIOHANDLE_REQUEST_OUTPUT)) &&
165 ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) ||
166 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) ||
167 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)))
170 /* Only one bias flag can be set. */
171 if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
172 (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
173 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
174 ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
175 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
181 static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp)
183 assign_bit(FLAG_ACTIVE_LOW, flagsp,
184 lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
185 assign_bit(FLAG_OPEN_DRAIN, flagsp,
186 lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
187 assign_bit(FLAG_OPEN_SOURCE, flagsp,
188 lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
189 assign_bit(FLAG_PULL_UP, flagsp,
190 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
191 assign_bit(FLAG_PULL_DOWN, flagsp,
192 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
193 assign_bit(FLAG_BIAS_DISABLE, flagsp,
194 lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
197 static long linehandle_set_config(struct linehandle_state *lh,
200 struct gpiohandle_config gcnf;
201 struct gpio_desc *desc;
205 if (copy_from_user(&gcnf, ip, sizeof(gcnf)))
209 ret = linehandle_validate_flags(lflags);
213 for (i = 0; i < lh->num_descs; i++) {
215 linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags);
218 * Lines have to be requested explicitly for input
219 * or output, else the line will be treated "as is".
221 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
222 int val = !!gcnf.default_values[i];
224 ret = gpiod_direction_output(desc, val);
227 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
228 ret = gpiod_direction_input(desc);
233 blocking_notifier_call_chain(&desc->gdev->notifier,
234 GPIO_V2_LINE_CHANGED_CONFIG,
240 static long linehandle_ioctl_unlocked(struct file *file, unsigned int cmd,
243 struct linehandle_state *lh = file->private_data;
244 void __user *ip = (void __user *)arg;
245 struct gpiohandle_data ghd;
246 DECLARE_BITMAP(vals, GPIOHANDLES_MAX);
254 case GPIOHANDLE_GET_LINE_VALUES_IOCTL:
255 /* NOTE: It's okay to read values of output lines */
256 ret = gpiod_get_array_value_complex(false, true,
257 lh->num_descs, lh->descs,
262 memset(&ghd, 0, sizeof(ghd));
263 for (i = 0; i < lh->num_descs; i++)
264 ghd.values[i] = test_bit(i, vals);
266 if (copy_to_user(ip, &ghd, sizeof(ghd)))
270 case GPIOHANDLE_SET_LINE_VALUES_IOCTL:
272 * All line descriptors were created at once with the same
273 * flags so just check if the first one is really output.
275 if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
278 if (copy_from_user(&ghd, ip, sizeof(ghd)))
281 /* Clamp all values to [0,1] */
282 for (i = 0; i < lh->num_descs; i++)
283 __assign_bit(i, vals, ghd.values[i]);
285 /* Reuse the array setting function */
286 return gpiod_set_array_value_complex(false,
292 case GPIOHANDLE_SET_CONFIG_IOCTL:
293 return linehandle_set_config(lh, ip);
299 static long linehandle_ioctl(struct file *file, unsigned int cmd,
302 struct linehandle_state *lh = file->private_data;
304 return call_ioctl_locked(file, cmd, arg, lh->gdev,
305 linehandle_ioctl_unlocked);
309 static long linehandle_ioctl_compat(struct file *file, unsigned int cmd,
312 return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
316 static void linehandle_free(struct linehandle_state *lh)
320 for (i = 0; i < lh->num_descs; i++)
322 gpiod_free(lh->descs[i]);
324 put_device(&lh->gdev->dev);
328 static int linehandle_release(struct inode *inode, struct file *file)
330 linehandle_free(file->private_data);
334 static const struct file_operations linehandle_fileops = {
335 .release = linehandle_release,
336 .owner = THIS_MODULE,
337 .llseek = noop_llseek,
338 .unlocked_ioctl = linehandle_ioctl,
340 .compat_ioctl = linehandle_ioctl_compat,
344 static int linehandle_create(struct gpio_device *gdev, void __user *ip)
346 struct gpiohandle_request handlereq;
347 struct linehandle_state *lh;
352 if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
354 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
357 lflags = handlereq.flags;
359 ret = linehandle_validate_flags(lflags);
363 lh = kzalloc(sizeof(*lh), GFP_KERNEL);
367 get_device(&gdev->dev);
369 if (handlereq.consumer_label[0] != '\0') {
370 /* label is only initialized if consumer_label is set */
371 lh->label = kstrndup(handlereq.consumer_label,
372 sizeof(handlereq.consumer_label) - 1,
380 lh->num_descs = handlereq.lines;
382 /* Request each GPIO */
383 for (i = 0; i < handlereq.lines; i++) {
384 u32 offset = handlereq.lineoffsets[i];
385 struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset);
392 ret = gpiod_request_user(desc, lh->label);
396 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags);
398 ret = gpiod_set_transitory(desc, false);
403 * Lines have to be requested explicitly for input
404 * or output, else the line will be treated "as is".
406 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
407 int val = !!handlereq.default_values[i];
409 ret = gpiod_direction_output(desc, val);
412 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
413 ret = gpiod_direction_input(desc);
418 blocking_notifier_call_chain(&desc->gdev->notifier,
419 GPIO_V2_LINE_CHANGED_REQUESTED, desc);
421 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
425 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
431 file = anon_inode_getfile("gpio-linehandle",
434 O_RDONLY | O_CLOEXEC);
437 goto out_put_unused_fd;
441 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
443 * fput() will trigger the release() callback, so do not go onto
444 * the regular error cleanup path here.
451 fd_install(fd, file);
453 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
464 #endif /* CONFIG_GPIO_CDEV_V1 */
467 * struct line - contains the state of a requested line
468 * @desc: the GPIO descriptor for this line.
469 * @req: the corresponding line request
470 * @irq: the interrupt triggered in response to events on this GPIO
471 * @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or
472 * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied
473 * @timestamp_ns: cache for the timestamp storing it between hardirq and
474 * IRQ thread, used to bring the timestamp close to the actual event
475 * @req_seqno: the seqno for the current edge event in the sequence of
476 * events for the corresponding line request. This is drawn from the @req.
477 * @line_seqno: the seqno for the current edge event in the sequence of
478 * events for this line.
479 * @work: the worker that implements software debouncing
480 * @sw_debounced: flag indicating if the software debouncer is active
481 * @level: the current debounced physical level of the line
482 * @hdesc: the Hardware Timestamp Engine (HTE) descriptor
483 * @raw_level: the line level at the time of event
484 * @total_discard_seq: the running counter of the discarded events
485 * @last_seqno: the last sequence number before debounce period expires
488 struct gpio_desc *desc;
490 * -- edge detector specific fields --
495 * The flags for the active edge detector configuration.
497 * edflags is set by linereq_create(), linereq_free(), and
498 * linereq_set_config_unlocked(), which are themselves mutually
499 * exclusive, and is accessed by edge_irq_thread(),
500 * process_hw_ts_thread() and debounce_work_func(),
501 * which can all live with a slightly stale value.
505 * timestamp_ns and req_seqno are accessed only by
506 * edge_irq_handler() and edge_irq_thread(), which are themselves
507 * mutually exclusive, so no additional protection is necessary.
512 * line_seqno is accessed by either edge_irq_thread() or
513 * debounce_work_func(), which are themselves mutually exclusive,
514 * so no additional protection is necessary.
518 * -- debouncer specific fields --
520 struct delayed_work work;
522 * sw_debounce is accessed by linereq_set_config(), which is the
523 * only setter, and linereq_get_values(), which can live with a
524 * slightly stale value.
526 unsigned int sw_debounced;
528 * level is accessed by debounce_work_func(), which is the only
529 * setter, and linereq_get_values() which can live with a slightly
534 struct hte_ts_desc hdesc;
536 * HTE provider sets line level at the time of event. The valid
537 * value is 0 or 1 and negative value for an error.
541 * when sw_debounce is set on HTE enabled line, this is running
542 * counter of the discarded events.
544 u32 total_discard_seq;
546 * when sw_debounce is set on HTE enabled line, this variable records
547 * last sequence number before debounce period expires.
550 #endif /* CONFIG_HTE */
554 * struct linereq - contains the state of a userspace line request
555 * @gdev: the GPIO device the line request pertains to
556 * @label: consumer label used to tag GPIO descriptors
557 * @num_lines: the number of lines in the lines array
558 * @wait: wait queue that handles blocking reads of events
559 * @event_buffer_size: the number of elements allocated in @events
560 * @events: KFIFO for the GPIO events
561 * @seqno: the sequence number for edge events generated on all lines in
562 * this line request. Note that this is not used when @num_lines is 1, as
563 * the line_seqno is then the same and is cheaper to calculate.
564 * @config_mutex: mutex for serializing ioctl() calls to ensure consistency
565 * of configuration, particularly multi-step accesses to desc flags.
566 * @lines: the lines held by this line request, with @num_lines elements.
569 struct gpio_device *gdev;
572 wait_queue_head_t wait;
573 u32 event_buffer_size;
574 DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event);
576 struct mutex config_mutex;
580 #define GPIO_V2_LINE_BIAS_FLAGS \
581 (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
582 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
583 GPIO_V2_LINE_FLAG_BIAS_DISABLED)
585 #define GPIO_V2_LINE_DIRECTION_FLAGS \
586 (GPIO_V2_LINE_FLAG_INPUT | \
587 GPIO_V2_LINE_FLAG_OUTPUT)
589 #define GPIO_V2_LINE_DRIVE_FLAGS \
590 (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \
591 GPIO_V2_LINE_FLAG_OPEN_SOURCE)
593 #define GPIO_V2_LINE_EDGE_FLAGS \
594 (GPIO_V2_LINE_FLAG_EDGE_RISING | \
595 GPIO_V2_LINE_FLAG_EDGE_FALLING)
597 #define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS
599 #define GPIO_V2_LINE_VALID_FLAGS \
600 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
601 GPIO_V2_LINE_DIRECTION_FLAGS | \
602 GPIO_V2_LINE_DRIVE_FLAGS | \
603 GPIO_V2_LINE_EDGE_FLAGS | \
604 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
605 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
606 GPIO_V2_LINE_BIAS_FLAGS)
608 /* subset of flags relevant for edge detector configuration */
609 #define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \
610 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
611 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
612 GPIO_V2_LINE_EDGE_FLAGS)
614 static void linereq_put_event(struct linereq *lr,
615 struct gpio_v2_line_event *le)
617 bool overflow = false;
619 spin_lock(&lr->wait.lock);
620 if (kfifo_is_full(&lr->events)) {
622 kfifo_skip(&lr->events);
624 kfifo_in(&lr->events, le, 1);
625 spin_unlock(&lr->wait.lock);
627 wake_up_poll(&lr->wait, EPOLLIN);
629 pr_debug_ratelimited("event FIFO is full - event dropped\n");
632 static u64 line_event_timestamp(struct line *line)
634 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
635 return ktime_get_real_ns();
636 else if (IS_ENABLED(CONFIG_HTE) &&
637 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
638 return line->timestamp_ns;
640 return ktime_get_ns();
643 static u32 line_event_id(int level)
645 return level ? GPIO_V2_LINE_EVENT_RISING_EDGE :
646 GPIO_V2_LINE_EVENT_FALLING_EDGE;
651 static enum hte_return process_hw_ts_thread(void *p)
655 struct gpio_v2_line_event le;
660 return HTE_CB_HANDLED;
665 memset(&le, 0, sizeof(le));
667 le.timestamp_ns = line->timestamp_ns;
668 edflags = READ_ONCE(line->edflags);
670 switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) {
671 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
672 level = (line->raw_level >= 0) ?
674 gpiod_get_raw_value_cansleep(line->desc);
676 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
679 le.id = line_event_id(level);
681 case GPIO_V2_LINE_FLAG_EDGE_RISING:
682 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
684 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
685 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
688 return HTE_CB_HANDLED;
690 le.line_seqno = line->line_seqno;
691 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
692 le.offset = gpio_chip_hwgpio(line->desc);
694 linereq_put_event(lr, &le);
696 return HTE_CB_HANDLED;
699 static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
706 return HTE_CB_HANDLED;
709 line->timestamp_ns = ts->tsc;
710 line->raw_level = ts->raw_level;
713 if (READ_ONCE(line->sw_debounced)) {
714 line->total_discard_seq++;
715 line->last_seqno = ts->seq;
716 mod_delayed_work(system_wq, &line->work,
717 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
719 if (unlikely(ts->seq < line->line_seqno))
720 return HTE_CB_HANDLED;
722 diff_seqno = ts->seq - line->line_seqno;
723 line->line_seqno = ts->seq;
724 if (lr->num_lines != 1)
725 line->req_seqno = atomic_add_return(diff_seqno,
728 return HTE_RUN_SECOND_CB;
731 return HTE_CB_HANDLED;
734 static int hte_edge_setup(struct line *line, u64 eflags)
737 unsigned long flags = 0;
738 struct hte_ts_desc *hdesc = &line->hdesc;
740 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
741 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
742 HTE_FALLING_EDGE_TS :
744 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
745 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
749 line->total_discard_seq = 0;
751 hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL,
754 ret = hte_ts_get(NULL, hdesc, 0);
758 return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread,
764 static int hte_edge_setup(struct line *line, u64 eflags)
768 #endif /* CONFIG_HTE */
770 static irqreturn_t edge_irq_thread(int irq, void *p)
772 struct line *line = p;
773 struct linereq *lr = line->req;
774 struct gpio_v2_line_event le;
776 /* Do not leak kernel stack to userspace */
777 memset(&le, 0, sizeof(le));
779 if (line->timestamp_ns) {
780 le.timestamp_ns = line->timestamp_ns;
783 * We may be running from a nested threaded interrupt in
784 * which case we didn't get the timestamp from
785 * edge_irq_handler().
787 le.timestamp_ns = line_event_timestamp(line);
788 if (lr->num_lines != 1)
789 line->req_seqno = atomic_inc_return(&lr->seqno);
791 line->timestamp_ns = 0;
793 switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) {
794 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
795 le.id = line_event_id(gpiod_get_value_cansleep(line->desc));
797 case GPIO_V2_LINE_FLAG_EDGE_RISING:
798 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
800 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
801 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
807 le.line_seqno = line->line_seqno;
808 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
809 le.offset = gpio_chip_hwgpio(line->desc);
811 linereq_put_event(lr, &le);
816 static irqreturn_t edge_irq_handler(int irq, void *p)
818 struct line *line = p;
819 struct linereq *lr = line->req;
822 * Just store the timestamp in hardirq context so we get it as
823 * close in time as possible to the actual event.
825 line->timestamp_ns = line_event_timestamp(line);
827 if (lr->num_lines != 1)
828 line->req_seqno = atomic_inc_return(&lr->seqno);
830 return IRQ_WAKE_THREAD;
834 * returns the current debounced logical value.
836 static bool debounced_value(struct line *line)
841 * minor race - debouncer may be stopped here, so edge_detector_stop()
842 * must leave the value unchanged so the following will read the level
843 * from when the debouncer was last running.
845 value = READ_ONCE(line->level);
847 if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
853 static irqreturn_t debounce_irq_handler(int irq, void *p)
855 struct line *line = p;
857 mod_delayed_work(system_wq, &line->work,
858 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
863 static void debounce_work_func(struct work_struct *work)
865 struct gpio_v2_line_event le;
866 struct line *line = container_of(work, struct line, work.work);
868 u64 eflags, edflags = READ_ONCE(line->edflags);
873 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
874 level = line->raw_level;
877 level = gpiod_get_raw_value_cansleep(line->desc);
879 pr_debug_ratelimited("debouncer failed to read line value\n");
883 if (READ_ONCE(line->level) == level)
886 WRITE_ONCE(line->level, level);
888 /* -- edge detection -- */
889 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
893 /* switch from physical level to logical - if they differ */
894 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
897 /* ignore edges that are not being monitored */
898 if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) ||
899 ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level))
902 /* Do not leak kernel stack to userspace */
903 memset(&le, 0, sizeof(le));
906 le.timestamp_ns = line_event_timestamp(line);
907 le.offset = gpio_chip_hwgpio(line->desc);
909 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) {
910 /* discard events except the last one */
911 line->total_discard_seq -= 1;
912 diff_seqno = line->last_seqno - line->total_discard_seq -
914 line->line_seqno = line->last_seqno - line->total_discard_seq;
915 le.line_seqno = line->line_seqno;
916 le.seqno = (lr->num_lines == 1) ?
917 le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
919 #endif /* CONFIG_HTE */
922 le.line_seqno = line->line_seqno;
923 le.seqno = (lr->num_lines == 1) ?
924 le.line_seqno : atomic_inc_return(&lr->seqno);
927 le.id = line_event_id(level);
929 linereq_put_event(lr, &le);
932 static int debounce_setup(struct line *line, unsigned int debounce_period_us)
934 unsigned long irqflags;
938 ret = gpiod_set_debounce(line->desc, debounce_period_us);
940 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
943 if (ret != -ENOTSUPP)
946 if (debounce_period_us) {
947 /* setup software debounce */
948 level = gpiod_get_raw_value_cansleep(line->desc);
952 if (!(IS_ENABLED(CONFIG_HTE) &&
953 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
954 irq = gpiod_to_irq(line->desc);
958 irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
959 ret = request_irq(irq, debounce_irq_handler, irqflags,
960 line->req->label, line);
965 ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
970 WRITE_ONCE(line->level, level);
971 WRITE_ONCE(line->sw_debounced, 1);
976 static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc,
977 unsigned int line_idx)
980 u64 mask = BIT_ULL(line_idx);
982 for (i = 0; i < lc->num_attrs; i++) {
983 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
984 (lc->attrs[i].mask & mask))
990 static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
991 unsigned int line_idx)
994 u64 mask = BIT_ULL(line_idx);
996 for (i = 0; i < lc->num_attrs; i++) {
997 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
998 (lc->attrs[i].mask & mask))
999 return lc->attrs[i].attr.debounce_period_us;
1004 static void edge_detector_stop(struct line *line)
1007 free_irq(line->irq, line);
1012 if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
1013 hte_ts_put(&line->hdesc);
1016 cancel_delayed_work_sync(&line->work);
1017 WRITE_ONCE(line->sw_debounced, 0);
1018 WRITE_ONCE(line->edflags, 0);
1020 WRITE_ONCE(line->desc->debounce_period_us, 0);
1021 /* do not change line->level - see comment in debounced_value() */
1024 static int edge_detector_setup(struct line *line,
1025 struct gpio_v2_line_config *lc,
1026 unsigned int line_idx, u64 edflags)
1028 u32 debounce_period_us;
1029 unsigned long irqflags = 0;
1033 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
1034 if (eflags && !kfifo_initialized(&line->req->events)) {
1035 ret = kfifo_alloc(&line->req->events,
1036 line->req->event_buffer_size, GFP_KERNEL);
1040 if (gpio_v2_line_config_debounced(lc, line_idx)) {
1041 debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
1042 ret = debounce_setup(line, debounce_period_us);
1045 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
1048 /* detection disabled or sw debouncer will provide edge detection */
1049 if (!eflags || READ_ONCE(line->sw_debounced))
1052 if (IS_ENABLED(CONFIG_HTE) &&
1053 (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1054 return hte_edge_setup(line, edflags);
1056 irq = gpiod_to_irq(line->desc);
1060 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
1061 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1062 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
1063 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
1064 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1065 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
1066 irqflags |= IRQF_ONESHOT;
1068 /* Request a thread to read the events */
1069 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
1070 irqflags, line->req->label, line);
1078 static int edge_detector_update(struct line *line,
1079 struct gpio_v2_line_config *lc,
1080 unsigned int line_idx, u64 edflags)
1082 u64 active_edflags = READ_ONCE(line->edflags);
1083 unsigned int debounce_period_us =
1084 gpio_v2_line_config_debounce_period(lc, line_idx);
1086 if ((active_edflags == edflags) &&
1087 (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
1090 /* sw debounced and still will be...*/
1091 if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
1092 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
1096 /* reconfiguring edge detection or sw debounce being disabled */
1097 if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
1098 (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) ||
1099 (!debounce_period_us && READ_ONCE(line->sw_debounced)))
1100 edge_detector_stop(line);
1102 return edge_detector_setup(line, lc, line_idx, edflags);
1105 static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
1106 unsigned int line_idx)
1109 u64 mask = BIT_ULL(line_idx);
1111 for (i = 0; i < lc->num_attrs; i++) {
1112 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) &&
1113 (lc->attrs[i].mask & mask))
1114 return lc->attrs[i].attr.flags;
1119 static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc,
1120 unsigned int line_idx)
1123 u64 mask = BIT_ULL(line_idx);
1125 for (i = 0; i < lc->num_attrs; i++) {
1126 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) &&
1127 (lc->attrs[i].mask & mask))
1128 return !!(lc->attrs[i].attr.values & mask);
1133 static int gpio_v2_line_flags_validate(u64 flags)
1135 /* Return an error if an unknown flag is set */
1136 if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
1139 if (!IS_ENABLED(CONFIG_HTE) &&
1140 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1144 * Do not allow both INPUT and OUTPUT flags to be set as they are
1147 if ((flags & GPIO_V2_LINE_FLAG_INPUT) &&
1148 (flags & GPIO_V2_LINE_FLAG_OUTPUT))
1151 /* Only allow one event clock source */
1152 if (IS_ENABLED(CONFIG_HTE) &&
1153 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
1154 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1157 /* Edge detection requires explicit input. */
1158 if ((flags & GPIO_V2_LINE_EDGE_FLAGS) &&
1159 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1163 * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single
1164 * request. If the hardware actually supports enabling both at the
1165 * same time the electrical result would be disastrous.
1167 if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) &&
1168 (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE))
1171 /* Drive requires explicit output direction. */
1172 if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) &&
1173 !(flags & GPIO_V2_LINE_FLAG_OUTPUT))
1176 /* Bias requires explicit direction. */
1177 if ((flags & GPIO_V2_LINE_BIAS_FLAGS) &&
1178 !(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
1181 /* Only one bias flag can be set. */
1182 if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) &&
1183 (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN |
1184 GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) ||
1185 ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) &&
1186 (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)))
1192 static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc,
1193 unsigned int num_lines)
1199 if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX)
1202 if (memchr_inv(lc->padding, 0, sizeof(lc->padding)))
1205 for (i = 0; i < num_lines; i++) {
1206 flags = gpio_v2_line_config_flags(lc, i);
1207 ret = gpio_v2_line_flags_validate(flags);
1211 /* debounce requires explicit input */
1212 if (gpio_v2_line_config_debounced(lc, i) &&
1213 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1219 static void gpio_v2_line_config_flags_to_desc_flags(u64 flags,
1220 unsigned long *flagsp)
1222 assign_bit(FLAG_ACTIVE_LOW, flagsp,
1223 flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
1225 if (flags & GPIO_V2_LINE_FLAG_OUTPUT)
1226 set_bit(FLAG_IS_OUT, flagsp);
1227 else if (flags & GPIO_V2_LINE_FLAG_INPUT)
1228 clear_bit(FLAG_IS_OUT, flagsp);
1230 assign_bit(FLAG_EDGE_RISING, flagsp,
1231 flags & GPIO_V2_LINE_FLAG_EDGE_RISING);
1232 assign_bit(FLAG_EDGE_FALLING, flagsp,
1233 flags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
1235 assign_bit(FLAG_OPEN_DRAIN, flagsp,
1236 flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
1237 assign_bit(FLAG_OPEN_SOURCE, flagsp,
1238 flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
1240 assign_bit(FLAG_PULL_UP, flagsp,
1241 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
1242 assign_bit(FLAG_PULL_DOWN, flagsp,
1243 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
1244 assign_bit(FLAG_BIAS_DISABLE, flagsp,
1245 flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
1247 assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp,
1248 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
1249 assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp,
1250 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
1253 static long linereq_get_values(struct linereq *lr, void __user *ip)
1255 struct gpio_v2_line_values lv;
1256 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1257 struct gpio_desc **descs;
1258 unsigned int i, didx, num_get;
1262 /* NOTE: It's ok to read values of output lines. */
1263 if (copy_from_user(&lv, ip, sizeof(lv)))
1266 for (num_get = 0, i = 0; i < lr->num_lines; i++) {
1267 if (lv.mask & BIT_ULL(i)) {
1269 descs = &lr->lines[i].desc;
1277 descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL);
1280 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1281 if (lv.mask & BIT_ULL(i)) {
1282 descs[didx] = lr->lines[i].desc;
1287 ret = gpiod_get_array_value_complex(false, true, num_get,
1296 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1297 if (lv.mask & BIT_ULL(i)) {
1298 if (lr->lines[i].sw_debounced)
1299 val = debounced_value(&lr->lines[i]);
1301 val = test_bit(didx, vals);
1303 lv.bits |= BIT_ULL(i);
1308 if (copy_to_user(ip, &lv, sizeof(lv)))
1314 static long linereq_set_values_unlocked(struct linereq *lr,
1315 struct gpio_v2_line_values *lv)
1317 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1318 struct gpio_desc **descs;
1319 unsigned int i, didx, num_set;
1322 bitmap_zero(vals, GPIO_V2_LINES_MAX);
1323 for (num_set = 0, i = 0; i < lr->num_lines; i++) {
1324 if (lv->mask & BIT_ULL(i)) {
1325 if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags))
1327 if (lv->bits & BIT_ULL(i))
1328 __set_bit(num_set, vals);
1330 descs = &lr->lines[i].desc;
1337 /* build compacted desc array and values */
1338 descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL);
1341 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1342 if (lv->mask & BIT_ULL(i)) {
1343 descs[didx] = lr->lines[i].desc;
1348 ret = gpiod_set_array_value_complex(false, true, num_set,
1356 static long linereq_set_values(struct linereq *lr, void __user *ip)
1358 struct gpio_v2_line_values lv;
1361 if (copy_from_user(&lv, ip, sizeof(lv)))
1364 mutex_lock(&lr->config_mutex);
1366 ret = linereq_set_values_unlocked(lr, &lv);
1368 mutex_unlock(&lr->config_mutex);
1373 static long linereq_set_config_unlocked(struct linereq *lr,
1374 struct gpio_v2_line_config *lc)
1376 struct gpio_desc *desc;
1382 for (i = 0; i < lr->num_lines; i++) {
1383 line = &lr->lines[i];
1384 desc = lr->lines[i].desc;
1385 flags = gpio_v2_line_config_flags(lc, i);
1386 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1387 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1389 * Lines have to be requested explicitly for input
1390 * or output, else the line will be treated "as is".
1392 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1393 int val = gpio_v2_line_config_output_value(lc, i);
1395 edge_detector_stop(line);
1396 ret = gpiod_direction_output(desc, val);
1399 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
1400 ret = gpiod_direction_input(desc);
1404 ret = edge_detector_update(line, lc, i, edflags);
1409 WRITE_ONCE(line->edflags, edflags);
1411 blocking_notifier_call_chain(&desc->gdev->notifier,
1412 GPIO_V2_LINE_CHANGED_CONFIG,
1418 static long linereq_set_config(struct linereq *lr, void __user *ip)
1420 struct gpio_v2_line_config lc;
1423 if (copy_from_user(&lc, ip, sizeof(lc)))
1426 ret = gpio_v2_line_config_validate(&lc, lr->num_lines);
1430 mutex_lock(&lr->config_mutex);
1432 ret = linereq_set_config_unlocked(lr, &lc);
1434 mutex_unlock(&lr->config_mutex);
1439 static long linereq_ioctl_unlocked(struct file *file, unsigned int cmd,
1442 struct linereq *lr = file->private_data;
1443 void __user *ip = (void __user *)arg;
1445 if (!lr->gdev->chip)
1449 case GPIO_V2_LINE_GET_VALUES_IOCTL:
1450 return linereq_get_values(lr, ip);
1451 case GPIO_V2_LINE_SET_VALUES_IOCTL:
1452 return linereq_set_values(lr, ip);
1453 case GPIO_V2_LINE_SET_CONFIG_IOCTL:
1454 return linereq_set_config(lr, ip);
1460 static long linereq_ioctl(struct file *file, unsigned int cmd,
1463 struct linereq *lr = file->private_data;
1465 return call_ioctl_locked(file, cmd, arg, lr->gdev,
1466 linereq_ioctl_unlocked);
1469 #ifdef CONFIG_COMPAT
1470 static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
1473 return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1477 static __poll_t linereq_poll_unlocked(struct file *file,
1478 struct poll_table_struct *wait)
1480 struct linereq *lr = file->private_data;
1481 __poll_t events = 0;
1483 if (!lr->gdev->chip)
1484 return EPOLLHUP | EPOLLERR;
1486 poll_wait(file, &lr->wait, wait);
1488 if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events,
1490 events = EPOLLIN | EPOLLRDNORM;
1495 static __poll_t linereq_poll(struct file *file,
1496 struct poll_table_struct *wait)
1498 struct linereq *lr = file->private_data;
1500 return call_poll_locked(file, wait, lr->gdev, linereq_poll_unlocked);
1503 static ssize_t linereq_read_unlocked(struct file *file, char __user *buf,
1504 size_t count, loff_t *f_ps)
1506 struct linereq *lr = file->private_data;
1507 struct gpio_v2_line_event le;
1508 ssize_t bytes_read = 0;
1511 if (!lr->gdev->chip)
1514 if (count < sizeof(le))
1518 spin_lock(&lr->wait.lock);
1519 if (kfifo_is_empty(&lr->events)) {
1521 spin_unlock(&lr->wait.lock);
1525 if (file->f_flags & O_NONBLOCK) {
1526 spin_unlock(&lr->wait.lock);
1530 ret = wait_event_interruptible_locked(lr->wait,
1531 !kfifo_is_empty(&lr->events));
1533 spin_unlock(&lr->wait.lock);
1538 ret = kfifo_out(&lr->events, &le, 1);
1539 spin_unlock(&lr->wait.lock);
1542 * This should never happen - we were holding the
1543 * lock from the moment we learned the fifo is no
1544 * longer empty until now.
1550 if (copy_to_user(buf + bytes_read, &le, sizeof(le)))
1552 bytes_read += sizeof(le);
1553 } while (count >= bytes_read + sizeof(le));
1558 static ssize_t linereq_read(struct file *file, char __user *buf,
1559 size_t count, loff_t *f_ps)
1561 struct linereq *lr = file->private_data;
1563 return call_read_locked(file, buf, count, f_ps, lr->gdev,
1564 linereq_read_unlocked);
1567 static void linereq_free(struct linereq *lr)
1571 for (i = 0; i < lr->num_lines; i++) {
1572 if (lr->lines[i].desc) {
1573 edge_detector_stop(&lr->lines[i]);
1574 gpiod_free(lr->lines[i].desc);
1577 kfifo_free(&lr->events);
1579 put_device(&lr->gdev->dev);
1583 static int linereq_release(struct inode *inode, struct file *file)
1585 struct linereq *lr = file->private_data;
1591 #ifdef CONFIG_PROC_FS
1592 static void linereq_show_fdinfo(struct seq_file *out, struct file *file)
1594 struct linereq *lr = file->private_data;
1595 struct device *dev = &lr->gdev->dev;
1598 seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev));
1600 for (i = 0; i < lr->num_lines; i++)
1601 seq_printf(out, "gpio-line:\t%d\n",
1602 gpio_chip_hwgpio(lr->lines[i].desc));
1606 static const struct file_operations line_fileops = {
1607 .release = linereq_release,
1608 .read = linereq_read,
1609 .poll = linereq_poll,
1610 .owner = THIS_MODULE,
1611 .llseek = noop_llseek,
1612 .unlocked_ioctl = linereq_ioctl,
1613 #ifdef CONFIG_COMPAT
1614 .compat_ioctl = linereq_ioctl_compat,
1616 #ifdef CONFIG_PROC_FS
1617 .show_fdinfo = linereq_show_fdinfo,
1621 static int linereq_create(struct gpio_device *gdev, void __user *ip)
1623 struct gpio_v2_line_request ulr;
1624 struct gpio_v2_line_config *lc;
1631 if (copy_from_user(&ulr, ip, sizeof(ulr)))
1634 if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX))
1637 if (memchr_inv(ulr.padding, 0, sizeof(ulr.padding)))
1641 ret = gpio_v2_line_config_validate(lc, ulr.num_lines);
1645 lr = kzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL);
1650 get_device(&gdev->dev);
1652 for (i = 0; i < ulr.num_lines; i++) {
1653 lr->lines[i].req = lr;
1654 WRITE_ONCE(lr->lines[i].sw_debounced, 0);
1655 INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func);
1658 if (ulr.consumer[0] != '\0') {
1659 /* label is only initialized if consumer is set */
1660 lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1,
1664 goto out_free_linereq;
1668 mutex_init(&lr->config_mutex);
1669 init_waitqueue_head(&lr->wait);
1670 lr->event_buffer_size = ulr.event_buffer_size;
1671 if (lr->event_buffer_size == 0)
1672 lr->event_buffer_size = ulr.num_lines * 16;
1673 else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16)
1674 lr->event_buffer_size = GPIO_V2_LINES_MAX * 16;
1676 atomic_set(&lr->seqno, 0);
1677 lr->num_lines = ulr.num_lines;
1679 /* Request each GPIO */
1680 for (i = 0; i < ulr.num_lines; i++) {
1681 u32 offset = ulr.offsets[i];
1682 struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset);
1685 ret = PTR_ERR(desc);
1686 goto out_free_linereq;
1689 ret = gpiod_request_user(desc, lr->label);
1691 goto out_free_linereq;
1693 lr->lines[i].desc = desc;
1694 flags = gpio_v2_line_config_flags(lc, i);
1695 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1697 ret = gpiod_set_transitory(desc, false);
1699 goto out_free_linereq;
1701 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1703 * Lines have to be requested explicitly for input
1704 * or output, else the line will be treated "as is".
1706 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1707 int val = gpio_v2_line_config_output_value(lc, i);
1709 ret = gpiod_direction_output(desc, val);
1711 goto out_free_linereq;
1712 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
1713 ret = gpiod_direction_input(desc);
1715 goto out_free_linereq;
1717 ret = edge_detector_setup(&lr->lines[i], lc, i,
1720 goto out_free_linereq;
1723 lr->lines[i].edflags = edflags;
1725 blocking_notifier_call_chain(&desc->gdev->notifier,
1726 GPIO_V2_LINE_CHANGED_REQUESTED, desc);
1728 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
1732 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
1735 goto out_free_linereq;
1738 file = anon_inode_getfile("gpio-line", &line_fileops, lr,
1739 O_RDONLY | O_CLOEXEC);
1741 ret = PTR_ERR(file);
1742 goto out_put_unused_fd;
1746 if (copy_to_user(ip, &ulr, sizeof(ulr))) {
1748 * fput() will trigger the release() callback, so do not go onto
1749 * the regular error cleanup path here.
1756 fd_install(fd, file);
1758 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
1770 #ifdef CONFIG_GPIO_CDEV_V1
1773 * GPIO line event management
1777 * struct lineevent_state - contains the state of a userspace event
1778 * @gdev: the GPIO device the event pertains to
1779 * @label: consumer label used to tag descriptors
1780 * @desc: the GPIO descriptor held by this event
1781 * @eflags: the event flags this line was requested with
1782 * @irq: the interrupt that trigger in response to events on this GPIO
1783 * @wait: wait queue that handles blocking reads of events
1784 * @events: KFIFO for the GPIO events
1785 * @timestamp: cache for the timestamp storing it between hardirq
1786 * and IRQ thread, used to bring the timestamp close to the actual
1789 struct lineevent_state {
1790 struct gpio_device *gdev;
1792 struct gpio_desc *desc;
1795 wait_queue_head_t wait;
1796 DECLARE_KFIFO(events, struct gpioevent_data, 16);
1800 #define GPIOEVENT_REQUEST_VALID_FLAGS \
1801 (GPIOEVENT_REQUEST_RISING_EDGE | \
1802 GPIOEVENT_REQUEST_FALLING_EDGE)
1804 static __poll_t lineevent_poll_unlocked(struct file *file,
1805 struct poll_table_struct *wait)
1807 struct lineevent_state *le = file->private_data;
1808 __poll_t events = 0;
1810 if (!le->gdev->chip)
1811 return EPOLLHUP | EPOLLERR;
1813 poll_wait(file, &le->wait, wait);
1815 if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock))
1816 events = EPOLLIN | EPOLLRDNORM;
1821 static __poll_t lineevent_poll(struct file *file,
1822 struct poll_table_struct *wait)
1824 struct lineevent_state *le = file->private_data;
1826 return call_poll_locked(file, wait, le->gdev, lineevent_poll_unlocked);
1829 struct compat_gpioeevent_data {
1830 compat_u64 timestamp;
1834 static ssize_t lineevent_read_unlocked(struct file *file, char __user *buf,
1835 size_t count, loff_t *f_ps)
1837 struct lineevent_state *le = file->private_data;
1838 struct gpioevent_data ge;
1839 ssize_t bytes_read = 0;
1843 if (!le->gdev->chip)
1847 * When compatible system call is being used the struct gpioevent_data,
1848 * in case of at least ia32, has different size due to the alignment
1849 * differences. Because we have first member 64 bits followed by one of
1850 * 32 bits there is no gap between them. The only difference is the
1851 * padding at the end of the data structure. Hence, we calculate the
1852 * actual sizeof() and pass this as an argument to copy_to_user() to
1853 * drop unneeded bytes from the output.
1855 if (compat_need_64bit_alignment_fixup())
1856 ge_size = sizeof(struct compat_gpioeevent_data);
1858 ge_size = sizeof(struct gpioevent_data);
1859 if (count < ge_size)
1863 spin_lock(&le->wait.lock);
1864 if (kfifo_is_empty(&le->events)) {
1866 spin_unlock(&le->wait.lock);
1870 if (file->f_flags & O_NONBLOCK) {
1871 spin_unlock(&le->wait.lock);
1875 ret = wait_event_interruptible_locked(le->wait,
1876 !kfifo_is_empty(&le->events));
1878 spin_unlock(&le->wait.lock);
1883 ret = kfifo_out(&le->events, &ge, 1);
1884 spin_unlock(&le->wait.lock);
1887 * This should never happen - we were holding the lock
1888 * from the moment we learned the fifo is no longer
1895 if (copy_to_user(buf + bytes_read, &ge, ge_size))
1897 bytes_read += ge_size;
1898 } while (count >= bytes_read + ge_size);
1903 static ssize_t lineevent_read(struct file *file, char __user *buf,
1904 size_t count, loff_t *f_ps)
1906 struct lineevent_state *le = file->private_data;
1908 return call_read_locked(file, buf, count, f_ps, le->gdev,
1909 lineevent_read_unlocked);
1912 static void lineevent_free(struct lineevent_state *le)
1915 free_irq(le->irq, le);
1917 gpiod_free(le->desc);
1919 put_device(&le->gdev->dev);
1923 static int lineevent_release(struct inode *inode, struct file *file)
1925 lineevent_free(file->private_data);
1929 static long lineevent_ioctl_unlocked(struct file *file, unsigned int cmd,
1932 struct lineevent_state *le = file->private_data;
1933 void __user *ip = (void __user *)arg;
1934 struct gpiohandle_data ghd;
1936 if (!le->gdev->chip)
1940 * We can get the value for an event line but not set it,
1941 * because it is input by definition.
1943 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
1946 memset(&ghd, 0, sizeof(ghd));
1948 val = gpiod_get_value_cansleep(le->desc);
1951 ghd.values[0] = val;
1953 if (copy_to_user(ip, &ghd, sizeof(ghd)))
1961 static long lineevent_ioctl(struct file *file, unsigned int cmd,
1964 struct lineevent_state *le = file->private_data;
1966 return call_ioctl_locked(file, cmd, arg, le->gdev,
1967 lineevent_ioctl_unlocked);
1970 #ifdef CONFIG_COMPAT
1971 static long lineevent_ioctl_compat(struct file *file, unsigned int cmd,
1974 return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1978 static const struct file_operations lineevent_fileops = {
1979 .release = lineevent_release,
1980 .read = lineevent_read,
1981 .poll = lineevent_poll,
1982 .owner = THIS_MODULE,
1983 .llseek = noop_llseek,
1984 .unlocked_ioctl = lineevent_ioctl,
1985 #ifdef CONFIG_COMPAT
1986 .compat_ioctl = lineevent_ioctl_compat,
1990 static irqreturn_t lineevent_irq_thread(int irq, void *p)
1992 struct lineevent_state *le = p;
1993 struct gpioevent_data ge;
1996 /* Do not leak kernel stack to userspace */
1997 memset(&ge, 0, sizeof(ge));
2000 * We may be running from a nested threaded interrupt in which case
2001 * we didn't get the timestamp from lineevent_irq_handler().
2004 ge.timestamp = ktime_get_ns();
2006 ge.timestamp = le->timestamp;
2008 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
2009 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
2010 int level = gpiod_get_value_cansleep(le->desc);
2013 /* Emit low-to-high event */
2014 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2016 /* Emit high-to-low event */
2017 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2018 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
2019 /* Emit low-to-high event */
2020 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2021 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
2022 /* Emit high-to-low event */
2023 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2028 ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge,
2031 wake_up_poll(&le->wait, EPOLLIN);
2033 pr_debug_ratelimited("event FIFO is full - event dropped\n");
2038 static irqreturn_t lineevent_irq_handler(int irq, void *p)
2040 struct lineevent_state *le = p;
2043 * Just store the timestamp in hardirq context so we get it as
2044 * close in time as possible to the actual event.
2046 le->timestamp = ktime_get_ns();
2048 return IRQ_WAKE_THREAD;
2051 static int lineevent_create(struct gpio_device *gdev, void __user *ip)
2053 struct gpioevent_request eventreq;
2054 struct lineevent_state *le;
2055 struct gpio_desc *desc;
2062 int irq, irqflags = 0;
2064 if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
2067 offset = eventreq.lineoffset;
2068 lflags = eventreq.handleflags;
2069 eflags = eventreq.eventflags;
2071 desc = gpiochip_get_desc(gdev->chip, offset);
2073 return PTR_ERR(desc);
2075 /* Return an error if a unknown flag is set */
2076 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
2077 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS))
2080 /* This is just wrong: we don't look for events on output lines */
2081 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
2082 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
2083 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
2086 /* Only one bias flag can be set. */
2087 if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
2088 (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
2089 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
2090 ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
2091 (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
2094 le = kzalloc(sizeof(*le), GFP_KERNEL);
2098 get_device(&gdev->dev);
2100 if (eventreq.consumer_label[0] != '\0') {
2101 /* label is only initialized if consumer_label is set */
2102 le->label = kstrndup(eventreq.consumer_label,
2103 sizeof(eventreq.consumer_label) - 1,
2111 ret = gpiod_request_user(desc, le->label);
2115 le->eflags = eflags;
2117 linehandle_flags_to_desc_flags(lflags, &desc->flags);
2119 ret = gpiod_direction_input(desc);
2123 blocking_notifier_call_chain(&desc->gdev->notifier,
2124 GPIO_V2_LINE_CHANGED_REQUESTED, desc);
2126 irq = gpiod_to_irq(desc);
2132 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
2133 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2134 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
2135 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
2136 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2137 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
2138 irqflags |= IRQF_ONESHOT;
2140 INIT_KFIFO(le->events);
2141 init_waitqueue_head(&le->wait);
2143 /* Request a thread to read the events */
2144 ret = request_threaded_irq(irq,
2145 lineevent_irq_handler,
2146 lineevent_irq_thread,
2155 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
2161 file = anon_inode_getfile("gpio-event",
2164 O_RDONLY | O_CLOEXEC);
2166 ret = PTR_ERR(file);
2167 goto out_put_unused_fd;
2171 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
2173 * fput() will trigger the release() callback, so do not go onto
2174 * the regular error cleanup path here.
2181 fd_install(fd, file);
2192 static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2,
2193 struct gpioline_info *info_v1)
2195 u64 flagsv2 = info_v2->flags;
2197 memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name));
2198 memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer));
2199 info_v1->line_offset = info_v2->offset;
2202 if (flagsv2 & GPIO_V2_LINE_FLAG_USED)
2203 info_v1->flags |= GPIOLINE_FLAG_KERNEL;
2205 if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT)
2206 info_v1->flags |= GPIOLINE_FLAG_IS_OUT;
2208 if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
2209 info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW;
2211 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN)
2212 info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN;
2213 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE)
2214 info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE;
2216 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)
2217 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
2218 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN)
2219 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
2220 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED)
2221 info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE;
2224 static void gpio_v2_line_info_changed_to_v1(
2225 struct gpio_v2_line_info_changed *lic_v2,
2226 struct gpioline_info_changed *lic_v1)
2228 memset(lic_v1, 0, sizeof(*lic_v1));
2229 gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
2230 lic_v1->timestamp = lic_v2->timestamp_ns;
2231 lic_v1->event_type = lic_v2->event_type;
2234 #endif /* CONFIG_GPIO_CDEV_V1 */
2236 static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
2237 struct gpio_v2_line_info *info)
2239 struct gpio_chip *gc = desc->gdev->chip;
2240 bool ok_for_pinctrl;
2241 unsigned long flags;
2242 u32 debounce_period_us;
2243 unsigned int num_attrs = 0;
2245 memset(info, 0, sizeof(*info));
2246 info->offset = gpio_chip_hwgpio(desc);
2249 * This function takes a mutex so we must check this before taking
2252 * FIXME: find a non-racy way to retrieve this information. Maybe a
2253 * lock common to both frameworks?
2256 pinctrl_gpio_can_use_line(gc->base + info->offset);
2258 spin_lock_irqsave(&gpio_lock, flags);
2261 strscpy(info->name, desc->name, sizeof(info->name));
2264 strscpy(info->consumer, desc->label, sizeof(info->consumer));
2267 * Userspace only need to know that the kernel is using this GPIO so
2271 if (test_bit(FLAG_REQUESTED, &desc->flags) ||
2272 test_bit(FLAG_IS_HOGGED, &desc->flags) ||
2273 test_bit(FLAG_USED_AS_IRQ, &desc->flags) ||
2274 test_bit(FLAG_EXPORT, &desc->flags) ||
2275 test_bit(FLAG_SYSFS, &desc->flags) ||
2276 !gpiochip_line_is_valid(gc, info->offset) ||
2278 info->flags |= GPIO_V2_LINE_FLAG_USED;
2280 if (test_bit(FLAG_IS_OUT, &desc->flags))
2281 info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
2283 info->flags |= GPIO_V2_LINE_FLAG_INPUT;
2285 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
2286 info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
2288 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
2289 info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
2290 if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
2291 info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
2293 if (test_bit(FLAG_BIAS_DISABLE, &desc->flags))
2294 info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
2295 if (test_bit(FLAG_PULL_DOWN, &desc->flags))
2296 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
2297 if (test_bit(FLAG_PULL_UP, &desc->flags))
2298 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
2300 if (test_bit(FLAG_EDGE_RISING, &desc->flags))
2301 info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
2302 if (test_bit(FLAG_EDGE_FALLING, &desc->flags))
2303 info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
2305 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &desc->flags))
2306 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
2307 else if (test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags))
2308 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
2310 debounce_period_us = READ_ONCE(desc->debounce_period_us);
2311 if (debounce_period_us) {
2312 info->attrs[num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
2313 info->attrs[num_attrs].debounce_period_us = debounce_period_us;
2316 info->num_attrs = num_attrs;
2318 spin_unlock_irqrestore(&gpio_lock, flags);
2321 struct gpio_chardev_data {
2322 struct gpio_device *gdev;
2323 wait_queue_head_t wait;
2324 DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32);
2325 struct notifier_block lineinfo_changed_nb;
2326 unsigned long *watched_lines;
2327 #ifdef CONFIG_GPIO_CDEV_V1
2328 atomic_t watch_abi_version;
2332 static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
2334 struct gpio_device *gdev = cdev->gdev;
2335 struct gpiochip_info chipinfo;
2337 memset(&chipinfo, 0, sizeof(chipinfo));
2339 strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
2340 strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
2341 chipinfo.lines = gdev->ngpio;
2342 if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
2347 #ifdef CONFIG_GPIO_CDEV_V1
2349 * returns 0 if the versions match, else the previously selected ABI version
2351 static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
2352 unsigned int version)
2354 int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version);
2356 if (abiv == version)
2362 static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
2365 struct gpio_desc *desc;
2366 struct gpioline_info lineinfo;
2367 struct gpio_v2_line_info lineinfo_v2;
2369 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2372 /* this doubles as a range check on line_offset */
2373 desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset);
2375 return PTR_ERR(desc);
2378 if (lineinfo_ensure_abi_version(cdev, 1))
2381 if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
2385 gpio_desc_to_lineinfo(desc, &lineinfo_v2);
2386 gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
2388 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2390 clear_bit(lineinfo.line_offset, cdev->watched_lines);
2398 static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
2401 struct gpio_desc *desc;
2402 struct gpio_v2_line_info lineinfo;
2404 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2407 if (memchr_inv(lineinfo.padding, 0, sizeof(lineinfo.padding)))
2410 desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.offset);
2412 return PTR_ERR(desc);
2415 #ifdef CONFIG_GPIO_CDEV_V1
2416 if (lineinfo_ensure_abi_version(cdev, 2))
2419 if (test_and_set_bit(lineinfo.offset, cdev->watched_lines))
2422 gpio_desc_to_lineinfo(desc, &lineinfo);
2424 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2426 clear_bit(lineinfo.offset, cdev->watched_lines);
2433 static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
2437 if (copy_from_user(&offset, ip, sizeof(offset)))
2440 if (offset >= cdev->gdev->ngpio)
2443 if (!test_and_clear_bit(offset, cdev->watched_lines))
2450 * gpio_ioctl() - ioctl handler for the GPIO chardev
2452 static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2454 struct gpio_chardev_data *cdev = file->private_data;
2455 struct gpio_device *gdev = cdev->gdev;
2456 void __user *ip = (void __user *)arg;
2458 /* We fail any subsequent ioctl():s when the chip is gone */
2462 /* Fill in the struct and pass to userspace */
2464 case GPIO_GET_CHIPINFO_IOCTL:
2465 return chipinfo_get(cdev, ip);
2466 #ifdef CONFIG_GPIO_CDEV_V1
2467 case GPIO_GET_LINEHANDLE_IOCTL:
2468 return linehandle_create(gdev, ip);
2469 case GPIO_GET_LINEEVENT_IOCTL:
2470 return lineevent_create(gdev, ip);
2471 case GPIO_GET_LINEINFO_IOCTL:
2472 return lineinfo_get_v1(cdev, ip, false);
2473 case GPIO_GET_LINEINFO_WATCH_IOCTL:
2474 return lineinfo_get_v1(cdev, ip, true);
2475 #endif /* CONFIG_GPIO_CDEV_V1 */
2476 case GPIO_V2_GET_LINEINFO_IOCTL:
2477 return lineinfo_get(cdev, ip, false);
2478 case GPIO_V2_GET_LINEINFO_WATCH_IOCTL:
2479 return lineinfo_get(cdev, ip, true);
2480 case GPIO_V2_GET_LINE_IOCTL:
2481 return linereq_create(gdev, ip);
2482 case GPIO_GET_LINEINFO_UNWATCH_IOCTL:
2483 return lineinfo_unwatch(cdev, ip);
2489 #ifdef CONFIG_COMPAT
2490 static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
2493 return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2497 static struct gpio_chardev_data *
2498 to_gpio_chardev_data(struct notifier_block *nb)
2500 return container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
2503 static int lineinfo_changed_notify(struct notifier_block *nb,
2504 unsigned long action, void *data)
2506 struct gpio_chardev_data *cdev = to_gpio_chardev_data(nb);
2507 struct gpio_v2_line_info_changed chg;
2508 struct gpio_desc *desc = data;
2511 if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines))
2514 memset(&chg, 0, sizeof(chg));
2515 chg.event_type = action;
2516 chg.timestamp_ns = ktime_get_ns();
2517 gpio_desc_to_lineinfo(desc, &chg.info);
2519 ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock);
2521 wake_up_poll(&cdev->wait, EPOLLIN);
2523 pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
2528 static __poll_t lineinfo_watch_poll_unlocked(struct file *file,
2529 struct poll_table_struct *pollt)
2531 struct gpio_chardev_data *cdev = file->private_data;
2532 __poll_t events = 0;
2534 if (!cdev->gdev->chip)
2535 return EPOLLHUP | EPOLLERR;
2537 poll_wait(file, &cdev->wait, pollt);
2539 if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events,
2541 events = EPOLLIN | EPOLLRDNORM;
2546 static __poll_t lineinfo_watch_poll(struct file *file,
2547 struct poll_table_struct *pollt)
2549 struct gpio_chardev_data *cdev = file->private_data;
2551 return call_poll_locked(file, pollt, cdev->gdev,
2552 lineinfo_watch_poll_unlocked);
2555 static ssize_t lineinfo_watch_read_unlocked(struct file *file, char __user *buf,
2556 size_t count, loff_t *off)
2558 struct gpio_chardev_data *cdev = file->private_data;
2559 struct gpio_v2_line_info_changed event;
2560 ssize_t bytes_read = 0;
2564 if (!cdev->gdev->chip)
2567 #ifndef CONFIG_GPIO_CDEV_V1
2568 event_size = sizeof(struct gpio_v2_line_info_changed);
2569 if (count < event_size)
2574 spin_lock(&cdev->wait.lock);
2575 if (kfifo_is_empty(&cdev->events)) {
2577 spin_unlock(&cdev->wait.lock);
2581 if (file->f_flags & O_NONBLOCK) {
2582 spin_unlock(&cdev->wait.lock);
2586 ret = wait_event_interruptible_locked(cdev->wait,
2587 !kfifo_is_empty(&cdev->events));
2589 spin_unlock(&cdev->wait.lock);
2593 #ifdef CONFIG_GPIO_CDEV_V1
2594 /* must be after kfifo check so watch_abi_version is set */
2595 if (atomic_read(&cdev->watch_abi_version) == 2)
2596 event_size = sizeof(struct gpio_v2_line_info_changed);
2598 event_size = sizeof(struct gpioline_info_changed);
2599 if (count < event_size) {
2600 spin_unlock(&cdev->wait.lock);
2604 ret = kfifo_out(&cdev->events, &event, 1);
2605 spin_unlock(&cdev->wait.lock);
2609 /* We should never get here. See lineevent_read(). */
2612 #ifdef CONFIG_GPIO_CDEV_V1
2613 if (event_size == sizeof(struct gpio_v2_line_info_changed)) {
2614 if (copy_to_user(buf + bytes_read, &event, event_size))
2617 struct gpioline_info_changed event_v1;
2619 gpio_v2_line_info_changed_to_v1(&event, &event_v1);
2620 if (copy_to_user(buf + bytes_read, &event_v1,
2625 if (copy_to_user(buf + bytes_read, &event, event_size))
2628 bytes_read += event_size;
2629 } while (count >= bytes_read + sizeof(event));
2634 static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
2635 size_t count, loff_t *off)
2637 struct gpio_chardev_data *cdev = file->private_data;
2639 return call_read_locked(file, buf, count, off, cdev->gdev,
2640 lineinfo_watch_read_unlocked);
2644 * gpio_chrdev_open() - open the chardev for ioctl operations
2645 * @inode: inode for this chardev
2646 * @file: file struct for storing private data
2647 * Returns 0 on success
2649 static int gpio_chrdev_open(struct inode *inode, struct file *file)
2651 struct gpio_device *gdev = container_of(inode->i_cdev,
2652 struct gpio_device, chrdev);
2653 struct gpio_chardev_data *cdev;
2656 down_read(&gdev->sem);
2658 /* Fail on open if the backing gpiochip is gone */
2664 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
2668 cdev->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL);
2669 if (!cdev->watched_lines)
2672 init_waitqueue_head(&cdev->wait);
2673 INIT_KFIFO(cdev->events);
2676 cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
2677 ret = blocking_notifier_chain_register(&gdev->notifier,
2678 &cdev->lineinfo_changed_nb);
2680 goto out_free_bitmap;
2682 get_device(&gdev->dev);
2683 file->private_data = cdev;
2685 ret = nonseekable_open(inode, file);
2687 goto out_unregister_notifier;
2689 up_read(&gdev->sem);
2693 out_unregister_notifier:
2694 blocking_notifier_chain_unregister(&gdev->notifier,
2695 &cdev->lineinfo_changed_nb);
2697 bitmap_free(cdev->watched_lines);
2701 up_read(&gdev->sem);
2706 * gpio_chrdev_release() - close chardev after ioctl operations
2707 * @inode: inode for this chardev
2708 * @file: file struct for storing private data
2709 * Returns 0 on success
2711 static int gpio_chrdev_release(struct inode *inode, struct file *file)
2713 struct gpio_chardev_data *cdev = file->private_data;
2714 struct gpio_device *gdev = cdev->gdev;
2716 bitmap_free(cdev->watched_lines);
2717 blocking_notifier_chain_unregister(&gdev->notifier,
2718 &cdev->lineinfo_changed_nb);
2719 put_device(&gdev->dev);
2725 static const struct file_operations gpio_fileops = {
2726 .release = gpio_chrdev_release,
2727 .open = gpio_chrdev_open,
2728 .poll = lineinfo_watch_poll,
2729 .read = lineinfo_watch_read,
2730 .owner = THIS_MODULE,
2731 .llseek = no_llseek,
2732 .unlocked_ioctl = gpio_ioctl,
2733 #ifdef CONFIG_COMPAT
2734 .compat_ioctl = gpio_ioctl_compat,
2738 int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
2742 cdev_init(&gdev->chrdev, &gpio_fileops);
2743 gdev->chrdev.owner = THIS_MODULE;
2744 gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id);
2746 ret = cdev_device_add(&gdev->chrdev, &gdev->dev);
2750 chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
2751 MAJOR(devt), gdev->id);
2756 void gpiolib_cdev_unregister(struct gpio_device *gdev)
2758 cdev_device_del(&gdev->chrdev, &gdev->dev);