1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/anon_inodes.h>
4 #include <linux/atomic.h>
5 #include <linux/bitmap.h>
6 #include <linux/build_bug.h>
7 #include <linux/cdev.h>
8 #include <linux/cleanup.h>
9 #include <linux/compat.h>
10 #include <linux/compiler.h>
11 #include <linux/device.h>
12 #include <linux/err.h>
13 #include <linux/file.h>
14 #include <linux/gpio.h>
15 #include <linux/gpio/driver.h>
16 #include <linux/hte.h>
17 #include <linux/interrupt.h>
18 #include <linux/irqreturn.h>
19 #include <linux/kfifo.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/overflow.h>
23 #include <linux/pinctrl/consumer.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/spinlock.h>
27 #include <linux/string.h>
28 #include <linux/timekeeping.h>
29 #include <linux/uaccess.h>
30 #include <linux/workqueue.h>
32 #include <uapi/linux/gpio.h>
35 #include "gpiolib-cdev.h"
38 * Array sizes must ensure 64-bit alignment and not create holes in the
41 static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2));
42 static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8));
45 * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility
47 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8));
48 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8));
49 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8));
50 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8));
51 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8));
52 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8));
53 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8));
54 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8));
56 /* Character device interface to GPIO.
58 * The GPIO character device, /dev/gpiochipN, provides userspace an
59 * interface to gpiolib GPIOs via ioctl()s.
63 * GPIO line handle management
66 #ifdef CONFIG_GPIO_CDEV_V1
68 * struct linehandle_state - contains the state of a userspace handle
69 * @gdev: the GPIO device the handle pertains to
70 * @label: consumer label used to tag descriptors
71 * @descs: the GPIO descriptors held by this handle
72 * @num_descs: the number of descriptors held in the descs array
74 struct linehandle_state {
75 struct gpio_device *gdev;
77 struct gpio_desc *descs[GPIOHANDLES_MAX];
81 #define GPIOHANDLE_REQUEST_VALID_FLAGS \
82 (GPIOHANDLE_REQUEST_INPUT | \
83 GPIOHANDLE_REQUEST_OUTPUT | \
84 GPIOHANDLE_REQUEST_ACTIVE_LOW | \
85 GPIOHANDLE_REQUEST_BIAS_PULL_UP | \
86 GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \
87 GPIOHANDLE_REQUEST_BIAS_DISABLE | \
88 GPIOHANDLE_REQUEST_OPEN_DRAIN | \
89 GPIOHANDLE_REQUEST_OPEN_SOURCE)
91 #define GPIOHANDLE_REQUEST_DIRECTION_FLAGS \
92 (GPIOHANDLE_REQUEST_INPUT | \
93 GPIOHANDLE_REQUEST_OUTPUT)
95 static int linehandle_validate_flags(u32 flags)
97 /* Return an error if an unknown flag is set */
98 if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
102 * Do not allow both INPUT & OUTPUT flags to be set as they are
105 if ((flags & GPIOHANDLE_REQUEST_INPUT) &&
106 (flags & GPIOHANDLE_REQUEST_OUTPUT))
110 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
111 * the hardware actually supports enabling both at the same time the
112 * electrical result would be disastrous.
114 if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
115 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
118 /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
119 if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) &&
120 ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
121 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
124 /* Bias flags only allowed for input or output mode. */
125 if (!((flags & GPIOHANDLE_REQUEST_INPUT) ||
126 (flags & GPIOHANDLE_REQUEST_OUTPUT)) &&
127 ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) ||
128 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) ||
129 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)))
132 /* Only one bias flag can be set. */
133 if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
134 (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
135 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
136 ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
137 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
143 static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp)
145 unsigned long flags = READ_ONCE(*flagsp);
147 assign_bit(FLAG_ACTIVE_LOW, &flags,
148 lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
149 assign_bit(FLAG_OPEN_DRAIN, &flags,
150 lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
151 assign_bit(FLAG_OPEN_SOURCE, &flags,
152 lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
153 assign_bit(FLAG_PULL_UP, &flags,
154 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
155 assign_bit(FLAG_PULL_DOWN, &flags,
156 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
157 assign_bit(FLAG_BIAS_DISABLE, &flags,
158 lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
160 WRITE_ONCE(*flagsp, flags);
163 static long linehandle_set_config(struct linehandle_state *lh,
166 struct gpiohandle_config gcnf;
167 struct gpio_desc *desc;
171 if (copy_from_user(&gcnf, ip, sizeof(gcnf)))
175 ret = linehandle_validate_flags(lflags);
179 /* Lines must be reconfigured explicitly as input or output. */
180 if (!(lflags & GPIOHANDLE_REQUEST_DIRECTION_FLAGS))
183 for (i = 0; i < lh->num_descs; i++) {
185 linehandle_flags_to_desc_flags(lflags, &desc->flags);
187 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
188 int val = !!gcnf.default_values[i];
190 ret = gpiod_direction_output_nonotify(desc, val);
194 ret = gpiod_direction_input_nonotify(desc);
199 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
204 static long linehandle_ioctl(struct file *file, unsigned int cmd,
207 struct linehandle_state *lh = file->private_data;
208 void __user *ip = (void __user *)arg;
209 struct gpiohandle_data ghd;
210 DECLARE_BITMAP(vals, GPIOHANDLES_MAX);
214 guard(srcu)(&lh->gdev->srcu);
216 if (!rcu_access_pointer(lh->gdev->chip))
220 case GPIOHANDLE_GET_LINE_VALUES_IOCTL:
221 /* NOTE: It's okay to read values of output lines */
222 ret = gpiod_get_array_value_complex(false, true,
223 lh->num_descs, lh->descs,
228 memset(&ghd, 0, sizeof(ghd));
229 for (i = 0; i < lh->num_descs; i++)
230 ghd.values[i] = test_bit(i, vals);
232 if (copy_to_user(ip, &ghd, sizeof(ghd)))
236 case GPIOHANDLE_SET_LINE_VALUES_IOCTL:
238 * All line descriptors were created at once with the same
239 * flags so just check if the first one is really output.
241 if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
244 if (copy_from_user(&ghd, ip, sizeof(ghd)))
247 /* Clamp all values to [0,1] */
248 for (i = 0; i < lh->num_descs; i++)
249 __assign_bit(i, vals, ghd.values[i]);
251 /* Reuse the array setting function */
252 return gpiod_set_array_value_complex(false,
258 case GPIOHANDLE_SET_CONFIG_IOCTL:
259 return linehandle_set_config(lh, ip);
266 static long linehandle_ioctl_compat(struct file *file, unsigned int cmd,
269 return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
273 static void linehandle_free(struct linehandle_state *lh)
277 for (i = 0; i < lh->num_descs; i++)
279 gpiod_free(lh->descs[i]);
281 gpio_device_put(lh->gdev);
285 static int linehandle_release(struct inode *inode, struct file *file)
287 linehandle_free(file->private_data);
291 static const struct file_operations linehandle_fileops = {
292 .release = linehandle_release,
293 .owner = THIS_MODULE,
294 .llseek = noop_llseek,
295 .unlocked_ioctl = linehandle_ioctl,
297 .compat_ioctl = linehandle_ioctl_compat,
301 static int linehandle_create(struct gpio_device *gdev, void __user *ip)
303 struct gpiohandle_request handlereq;
304 struct linehandle_state *lh;
309 if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
311 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
314 lflags = handlereq.flags;
316 ret = linehandle_validate_flags(lflags);
320 lh = kzalloc(sizeof(*lh), GFP_KERNEL);
323 lh->gdev = gpio_device_get(gdev);
325 if (handlereq.consumer_label[0] != '\0') {
326 /* label is only initialized if consumer_label is set */
327 lh->label = kstrndup(handlereq.consumer_label,
328 sizeof(handlereq.consumer_label) - 1,
336 lh->num_descs = handlereq.lines;
338 /* Request each GPIO */
339 for (i = 0; i < handlereq.lines; i++) {
340 u32 offset = handlereq.lineoffsets[i];
341 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset);
348 ret = gpiod_request_user(desc, lh->label);
352 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags);
354 ret = gpiod_set_transitory(desc, false);
359 * Lines have to be requested explicitly for input
360 * or output, else the line will be treated "as is".
362 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
363 int val = !!handlereq.default_values[i];
365 ret = gpiod_direction_output_nonotify(desc, val);
368 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
369 ret = gpiod_direction_input_nonotify(desc);
374 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
376 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
380 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
386 file = anon_inode_getfile("gpio-linehandle",
389 O_RDONLY | O_CLOEXEC);
392 goto out_put_unused_fd;
396 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
398 * fput() will trigger the release() callback, so do not go onto
399 * the regular error cleanup path here.
406 fd_install(fd, file);
408 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
419 #endif /* CONFIG_GPIO_CDEV_V1 */
422 * struct line - contains the state of a requested line
423 * @desc: the GPIO descriptor for this line.
424 * @req: the corresponding line request
425 * @irq: the interrupt triggered in response to events on this GPIO
426 * @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or
427 * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied
428 * @timestamp_ns: cache for the timestamp storing it between hardirq and
429 * IRQ thread, used to bring the timestamp close to the actual event
430 * @req_seqno: the seqno for the current edge event in the sequence of
431 * events for the corresponding line request. This is drawn from the @req.
432 * @line_seqno: the seqno for the current edge event in the sequence of
433 * events for this line.
434 * @work: the worker that implements software debouncing
435 * @sw_debounced: flag indicating if the software debouncer is active
436 * @level: the current debounced physical level of the line
437 * @hdesc: the Hardware Timestamp Engine (HTE) descriptor
438 * @raw_level: the line level at the time of event
439 * @total_discard_seq: the running counter of the discarded events
440 * @last_seqno: the last sequence number before debounce period expires
443 struct gpio_desc *desc;
445 * -- edge detector specific fields --
450 * The flags for the active edge detector configuration.
452 * edflags is set by linereq_create(), linereq_free(), and
453 * linereq_set_config(), which are themselves mutually
454 * exclusive, and is accessed by edge_irq_thread(),
455 * process_hw_ts_thread() and debounce_work_func(),
456 * which can all live with a slightly stale value.
460 * timestamp_ns and req_seqno are accessed only by
461 * edge_irq_handler() and edge_irq_thread(), which are themselves
462 * mutually exclusive, so no additional protection is necessary.
467 * line_seqno is accessed by either edge_irq_thread() or
468 * debounce_work_func(), which are themselves mutually exclusive,
469 * so no additional protection is necessary.
473 * -- debouncer specific fields --
475 struct delayed_work work;
477 * sw_debounce is accessed by linereq_set_config(), which is the
478 * only setter, and linereq_get_values(), which can live with a
479 * slightly stale value.
481 unsigned int sw_debounced;
483 * level is accessed by debounce_work_func(), which is the only
484 * setter, and linereq_get_values() which can live with a slightly
489 struct hte_ts_desc hdesc;
491 * HTE provider sets line level at the time of event. The valid
492 * value is 0 or 1 and negative value for an error.
496 * when sw_debounce is set on HTE enabled line, this is running
497 * counter of the discarded events.
499 u32 total_discard_seq;
501 * when sw_debounce is set on HTE enabled line, this variable records
502 * last sequence number before debounce period expires.
505 #endif /* CONFIG_HTE */
509 * struct linereq - contains the state of a userspace line request
510 * @gdev: the GPIO device the line request pertains to
511 * @label: consumer label used to tag GPIO descriptors
512 * @num_lines: the number of lines in the lines array
513 * @wait: wait queue that handles blocking reads of events
514 * @device_unregistered_nb: notifier block for receiving gdev unregister events
515 * @event_buffer_size: the number of elements allocated in @events
516 * @events: KFIFO for the GPIO events
517 * @seqno: the sequence number for edge events generated on all lines in
518 * this line request. Note that this is not used when @num_lines is 1, as
519 * the line_seqno is then the same and is cheaper to calculate.
520 * @config_mutex: mutex for serializing ioctl() calls to ensure consistency
521 * of configuration, particularly multi-step accesses to desc flags.
522 * @lines: the lines held by this line request, with @num_lines elements.
525 struct gpio_device *gdev;
528 wait_queue_head_t wait;
529 struct notifier_block device_unregistered_nb;
530 u32 event_buffer_size;
531 DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event);
533 struct mutex config_mutex;
534 struct line lines[] __counted_by(num_lines);
537 #define GPIO_V2_LINE_BIAS_FLAGS \
538 (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
539 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
540 GPIO_V2_LINE_FLAG_BIAS_DISABLED)
542 #define GPIO_V2_LINE_DIRECTION_FLAGS \
543 (GPIO_V2_LINE_FLAG_INPUT | \
544 GPIO_V2_LINE_FLAG_OUTPUT)
546 #define GPIO_V2_LINE_DRIVE_FLAGS \
547 (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \
548 GPIO_V2_LINE_FLAG_OPEN_SOURCE)
550 #define GPIO_V2_LINE_EDGE_FLAGS \
551 (GPIO_V2_LINE_FLAG_EDGE_RISING | \
552 GPIO_V2_LINE_FLAG_EDGE_FALLING)
554 #define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS
556 #define GPIO_V2_LINE_VALID_FLAGS \
557 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
558 GPIO_V2_LINE_DIRECTION_FLAGS | \
559 GPIO_V2_LINE_DRIVE_FLAGS | \
560 GPIO_V2_LINE_EDGE_FLAGS | \
561 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
562 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
563 GPIO_V2_LINE_BIAS_FLAGS)
565 /* subset of flags relevant for edge detector configuration */
566 #define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \
567 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
568 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
569 GPIO_V2_LINE_EDGE_FLAGS)
571 static int linereq_unregistered_notify(struct notifier_block *nb,
572 unsigned long action, void *data)
574 struct linereq *lr = container_of(nb, struct linereq,
575 device_unregistered_nb);
577 wake_up_poll(&lr->wait, EPOLLIN | EPOLLERR);
582 static void linereq_put_event(struct linereq *lr,
583 struct gpio_v2_line_event *le)
585 bool overflow = false;
587 scoped_guard(spinlock, &lr->wait.lock) {
588 if (kfifo_is_full(&lr->events)) {
590 kfifo_skip(&lr->events);
592 kfifo_in(&lr->events, le, 1);
595 wake_up_poll(&lr->wait, EPOLLIN);
597 pr_debug_ratelimited("event FIFO is full - event dropped\n");
600 static u64 line_event_timestamp(struct line *line)
602 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
603 return ktime_get_real_ns();
604 else if (IS_ENABLED(CONFIG_HTE) &&
605 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
606 return line->timestamp_ns;
608 return ktime_get_ns();
611 static u32 line_event_id(int level)
613 return level ? GPIO_V2_LINE_EVENT_RISING_EDGE :
614 GPIO_V2_LINE_EVENT_FALLING_EDGE;
617 static inline char *make_irq_label(const char *orig)
624 new = kstrdup_and_replace(orig, '/', ':', GFP_KERNEL);
626 return ERR_PTR(-ENOMEM);
631 static inline void free_irq_label(const char *label)
638 static enum hte_return process_hw_ts_thread(void *p)
642 struct gpio_v2_line_event le;
647 return HTE_CB_HANDLED;
652 memset(&le, 0, sizeof(le));
654 le.timestamp_ns = line->timestamp_ns;
655 edflags = READ_ONCE(line->edflags);
657 switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) {
658 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
659 level = (line->raw_level >= 0) ?
661 gpiod_get_raw_value_cansleep(line->desc);
663 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
666 le.id = line_event_id(level);
668 case GPIO_V2_LINE_FLAG_EDGE_RISING:
669 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
671 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
672 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
675 return HTE_CB_HANDLED;
677 le.line_seqno = line->line_seqno;
678 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
679 le.offset = gpio_chip_hwgpio(line->desc);
681 linereq_put_event(lr, &le);
683 return HTE_CB_HANDLED;
686 static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
693 return HTE_CB_HANDLED;
696 line->timestamp_ns = ts->tsc;
697 line->raw_level = ts->raw_level;
700 if (READ_ONCE(line->sw_debounced)) {
701 line->total_discard_seq++;
702 line->last_seqno = ts->seq;
703 mod_delayed_work(system_wq, &line->work,
704 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
706 if (unlikely(ts->seq < line->line_seqno))
707 return HTE_CB_HANDLED;
709 diff_seqno = ts->seq - line->line_seqno;
710 line->line_seqno = ts->seq;
711 if (lr->num_lines != 1)
712 line->req_seqno = atomic_add_return(diff_seqno,
715 return HTE_RUN_SECOND_CB;
718 return HTE_CB_HANDLED;
721 static int hte_edge_setup(struct line *line, u64 eflags)
724 unsigned long flags = 0;
725 struct hte_ts_desc *hdesc = &line->hdesc;
727 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
728 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
729 HTE_FALLING_EDGE_TS :
731 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
732 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
736 line->total_discard_seq = 0;
738 hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL,
741 ret = hte_ts_get(NULL, hdesc, 0);
745 return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread,
751 static int hte_edge_setup(struct line *line, u64 eflags)
755 #endif /* CONFIG_HTE */
757 static irqreturn_t edge_irq_thread(int irq, void *p)
759 struct line *line = p;
760 struct linereq *lr = line->req;
761 struct gpio_v2_line_event le;
763 /* Do not leak kernel stack to userspace */
764 memset(&le, 0, sizeof(le));
766 if (line->timestamp_ns) {
767 le.timestamp_ns = line->timestamp_ns;
770 * We may be running from a nested threaded interrupt in
771 * which case we didn't get the timestamp from
772 * edge_irq_handler().
774 le.timestamp_ns = line_event_timestamp(line);
775 if (lr->num_lines != 1)
776 line->req_seqno = atomic_inc_return(&lr->seqno);
778 line->timestamp_ns = 0;
780 switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) {
781 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
782 le.id = line_event_id(gpiod_get_value_cansleep(line->desc));
784 case GPIO_V2_LINE_FLAG_EDGE_RISING:
785 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
787 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
788 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
794 le.line_seqno = line->line_seqno;
795 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
796 le.offset = gpio_chip_hwgpio(line->desc);
798 linereq_put_event(lr, &le);
803 static irqreturn_t edge_irq_handler(int irq, void *p)
805 struct line *line = p;
806 struct linereq *lr = line->req;
809 * Just store the timestamp in hardirq context so we get it as
810 * close in time as possible to the actual event.
812 line->timestamp_ns = line_event_timestamp(line);
814 if (lr->num_lines != 1)
815 line->req_seqno = atomic_inc_return(&lr->seqno);
817 return IRQ_WAKE_THREAD;
821 * returns the current debounced logical value.
823 static bool debounced_value(struct line *line)
828 * minor race - debouncer may be stopped here, so edge_detector_stop()
829 * must leave the value unchanged so the following will read the level
830 * from when the debouncer was last running.
832 value = READ_ONCE(line->level);
834 if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
840 static irqreturn_t debounce_irq_handler(int irq, void *p)
842 struct line *line = p;
844 mod_delayed_work(system_wq, &line->work,
845 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
850 static void debounce_work_func(struct work_struct *work)
852 struct gpio_v2_line_event le;
853 struct line *line = container_of(work, struct line, work.work);
855 u64 eflags, edflags = READ_ONCE(line->edflags);
860 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
861 level = line->raw_level;
864 level = gpiod_get_raw_value_cansleep(line->desc);
866 pr_debug_ratelimited("debouncer failed to read line value\n");
870 if (READ_ONCE(line->level) == level)
873 WRITE_ONCE(line->level, level);
875 /* -- edge detection -- */
876 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
880 /* switch from physical level to logical - if they differ */
881 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
884 /* ignore edges that are not being monitored */
885 if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) ||
886 ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level))
889 /* Do not leak kernel stack to userspace */
890 memset(&le, 0, sizeof(le));
893 le.timestamp_ns = line_event_timestamp(line);
894 le.offset = gpio_chip_hwgpio(line->desc);
896 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) {
897 /* discard events except the last one */
898 line->total_discard_seq -= 1;
899 diff_seqno = line->last_seqno - line->total_discard_seq -
901 line->line_seqno = line->last_seqno - line->total_discard_seq;
902 le.line_seqno = line->line_seqno;
903 le.seqno = (lr->num_lines == 1) ?
904 le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
906 #endif /* CONFIG_HTE */
909 le.line_seqno = line->line_seqno;
910 le.seqno = (lr->num_lines == 1) ?
911 le.line_seqno : atomic_inc_return(&lr->seqno);
914 le.id = line_event_id(level);
916 linereq_put_event(lr, &le);
919 static int debounce_setup(struct line *line, unsigned int debounce_period_us)
921 unsigned long irqflags;
926 * Try hardware. Skip gpiod_set_config() to avoid emitting two
927 * CHANGED_CONFIG line state events.
929 ret = gpio_do_set_config(line->desc,
930 pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE,
931 debounce_period_us));
932 if (ret != -ENOTSUPP)
935 if (debounce_period_us) {
936 /* setup software debounce */
937 level = gpiod_get_raw_value_cansleep(line->desc);
941 if (!(IS_ENABLED(CONFIG_HTE) &&
942 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
943 irq = gpiod_to_irq(line->desc);
947 label = make_irq_label(line->req->label);
951 irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
952 ret = request_irq(irq, debounce_irq_handler, irqflags,
955 free_irq_label(label);
960 ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
965 WRITE_ONCE(line->level, level);
966 WRITE_ONCE(line->sw_debounced, 1);
971 static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc,
972 unsigned int line_idx)
975 u64 mask = BIT_ULL(line_idx);
977 for (i = 0; i < lc->num_attrs; i++) {
978 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
979 (lc->attrs[i].mask & mask))
985 static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
986 unsigned int line_idx)
989 u64 mask = BIT_ULL(line_idx);
991 for (i = 0; i < lc->num_attrs; i++) {
992 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
993 (lc->attrs[i].mask & mask))
994 return lc->attrs[i].attr.debounce_period_us;
999 static void edge_detector_stop(struct line *line)
1002 free_irq_label(free_irq(line->irq, line));
1007 if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
1008 hte_ts_put(&line->hdesc);
1011 cancel_delayed_work_sync(&line->work);
1012 WRITE_ONCE(line->sw_debounced, 0);
1013 WRITE_ONCE(line->edflags, 0);
1015 WRITE_ONCE(line->desc->debounce_period_us, 0);
1016 /* do not change line->level - see comment in debounced_value() */
1019 static int edge_detector_fifo_init(struct linereq *req)
1021 if (kfifo_initialized(&req->events))
1024 return kfifo_alloc(&req->events, req->event_buffer_size, GFP_KERNEL);
1027 static int edge_detector_setup(struct line *line,
1028 struct gpio_v2_line_config *lc,
1029 unsigned int line_idx, u64 edflags)
1031 u32 debounce_period_us;
1032 unsigned long irqflags = 0;
1037 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
1039 ret = edge_detector_fifo_init(line->req);
1043 if (gpio_v2_line_config_debounced(lc, line_idx)) {
1044 debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
1045 ret = debounce_setup(line, debounce_period_us);
1048 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
1051 /* detection disabled or sw debouncer will provide edge detection */
1052 if (!eflags || READ_ONCE(line->sw_debounced))
1055 if (IS_ENABLED(CONFIG_HTE) &&
1056 (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1057 return hte_edge_setup(line, edflags);
1059 irq = gpiod_to_irq(line->desc);
1063 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
1064 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1065 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
1066 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
1067 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1068 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
1069 irqflags |= IRQF_ONESHOT;
1071 label = make_irq_label(line->req->label);
1073 return PTR_ERR(label);
1075 /* Request a thread to read the events */
1076 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
1077 irqflags, label, line);
1079 free_irq_label(label);
1087 static int edge_detector_update(struct line *line,
1088 struct gpio_v2_line_config *lc,
1089 unsigned int line_idx, u64 edflags)
1091 u64 active_edflags = READ_ONCE(line->edflags);
1092 unsigned int debounce_period_us =
1093 gpio_v2_line_config_debounce_period(lc, line_idx);
1095 if ((active_edflags == edflags) &&
1096 (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
1099 /* sw debounced and still will be...*/
1100 if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
1101 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
1103 * ensure event fifo is initialised if edge detection
1106 if (edflags & GPIO_V2_LINE_EDGE_FLAGS)
1107 return edge_detector_fifo_init(line->req);
1112 /* reconfiguring edge detection or sw debounce being disabled */
1113 if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
1114 (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) ||
1115 (!debounce_period_us && READ_ONCE(line->sw_debounced)))
1116 edge_detector_stop(line);
1118 return edge_detector_setup(line, lc, line_idx, edflags);
1121 static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
1122 unsigned int line_idx)
1125 u64 mask = BIT_ULL(line_idx);
1127 for (i = 0; i < lc->num_attrs; i++) {
1128 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) &&
1129 (lc->attrs[i].mask & mask))
1130 return lc->attrs[i].attr.flags;
1135 static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc,
1136 unsigned int line_idx)
1139 u64 mask = BIT_ULL(line_idx);
1141 for (i = 0; i < lc->num_attrs; i++) {
1142 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) &&
1143 (lc->attrs[i].mask & mask))
1144 return !!(lc->attrs[i].attr.values & mask);
1149 static int gpio_v2_line_flags_validate(u64 flags)
1151 /* Return an error if an unknown flag is set */
1152 if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
1155 if (!IS_ENABLED(CONFIG_HTE) &&
1156 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1160 * Do not allow both INPUT and OUTPUT flags to be set as they are
1163 if ((flags & GPIO_V2_LINE_FLAG_INPUT) &&
1164 (flags & GPIO_V2_LINE_FLAG_OUTPUT))
1167 /* Only allow one event clock source */
1168 if (IS_ENABLED(CONFIG_HTE) &&
1169 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
1170 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1173 /* Edge detection requires explicit input. */
1174 if ((flags & GPIO_V2_LINE_EDGE_FLAGS) &&
1175 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1179 * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single
1180 * request. If the hardware actually supports enabling both at the
1181 * same time the electrical result would be disastrous.
1183 if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) &&
1184 (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE))
1187 /* Drive requires explicit output direction. */
1188 if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) &&
1189 !(flags & GPIO_V2_LINE_FLAG_OUTPUT))
1192 /* Bias requires explicit direction. */
1193 if ((flags & GPIO_V2_LINE_BIAS_FLAGS) &&
1194 !(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
1197 /* Only one bias flag can be set. */
1198 if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) &&
1199 (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN |
1200 GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) ||
1201 ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) &&
1202 (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)))
1208 static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc,
1209 unsigned int num_lines)
1215 if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX)
1218 if (!mem_is_zero(lc->padding, sizeof(lc->padding)))
1221 for (i = 0; i < num_lines; i++) {
1222 flags = gpio_v2_line_config_flags(lc, i);
1223 ret = gpio_v2_line_flags_validate(flags);
1227 /* debounce requires explicit input */
1228 if (gpio_v2_line_config_debounced(lc, i) &&
1229 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1235 static void gpio_v2_line_config_flags_to_desc_flags(u64 lflags,
1236 unsigned long *flagsp)
1238 unsigned long flags = READ_ONCE(*flagsp);
1240 assign_bit(FLAG_ACTIVE_LOW, &flags,
1241 lflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
1243 if (lflags & GPIO_V2_LINE_FLAG_OUTPUT)
1244 set_bit(FLAG_IS_OUT, &flags);
1245 else if (lflags & GPIO_V2_LINE_FLAG_INPUT)
1246 clear_bit(FLAG_IS_OUT, &flags);
1248 assign_bit(FLAG_EDGE_RISING, &flags,
1249 lflags & GPIO_V2_LINE_FLAG_EDGE_RISING);
1250 assign_bit(FLAG_EDGE_FALLING, &flags,
1251 lflags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
1253 assign_bit(FLAG_OPEN_DRAIN, &flags,
1254 lflags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
1255 assign_bit(FLAG_OPEN_SOURCE, &flags,
1256 lflags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
1258 assign_bit(FLAG_PULL_UP, &flags,
1259 lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
1260 assign_bit(FLAG_PULL_DOWN, &flags,
1261 lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
1262 assign_bit(FLAG_BIAS_DISABLE, &flags,
1263 lflags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
1265 assign_bit(FLAG_EVENT_CLOCK_REALTIME, &flags,
1266 lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
1267 assign_bit(FLAG_EVENT_CLOCK_HTE, &flags,
1268 lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
1270 WRITE_ONCE(*flagsp, flags);
1273 static long linereq_get_values(struct linereq *lr, void __user *ip)
1275 struct gpio_v2_line_values lv;
1276 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1277 struct gpio_desc **descs;
1278 unsigned int i, didx, num_get;
1282 /* NOTE: It's ok to read values of output lines. */
1283 if (copy_from_user(&lv, ip, sizeof(lv)))
1287 * gpiod_get_array_value_complex() requires compacted desc and val
1288 * arrays, rather than the sparse ones in lv.
1289 * Calculation of num_get and construction of the desc array is
1290 * optimized to avoid allocation for the desc array for the common
1291 * num_get == 1 case.
1293 /* scan requested lines to calculate the subset to get */
1294 for (num_get = 0, i = 0; i < lr->num_lines; i++) {
1295 if (lv.mask & BIT_ULL(i)) {
1297 /* capture desc for the num_get == 1 case */
1298 descs = &lr->lines[i].desc;
1306 /* build compacted desc array */
1307 descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL);
1310 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1311 if (lv.mask & BIT_ULL(i)) {
1312 descs[didx] = lr->lines[i].desc;
1317 ret = gpiod_get_array_value_complex(false, true, num_get,
1326 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1327 /* unpack compacted vals for the response */
1328 if (lv.mask & BIT_ULL(i)) {
1329 if (lr->lines[i].sw_debounced)
1330 val = debounced_value(&lr->lines[i]);
1332 val = test_bit(didx, vals);
1334 lv.bits |= BIT_ULL(i);
1339 if (copy_to_user(ip, &lv, sizeof(lv)))
1345 static long linereq_set_values(struct linereq *lr, void __user *ip)
1347 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1348 struct gpio_v2_line_values lv;
1349 struct gpio_desc **descs;
1350 unsigned int i, didx, num_set;
1353 if (copy_from_user(&lv, ip, sizeof(lv)))
1356 guard(mutex)(&lr->config_mutex);
1359 * gpiod_set_array_value_complex() requires compacted desc and val
1360 * arrays, rather than the sparse ones in lv.
1361 * Calculation of num_set and construction of the descs and vals arrays
1362 * is optimized to minimize scanning the lv->mask, and to avoid
1363 * allocation for the desc array for the common num_set == 1 case.
1365 bitmap_zero(vals, GPIO_V2_LINES_MAX);
1366 /* scan requested lines to determine the subset to be set */
1367 for (num_set = 0, i = 0; i < lr->num_lines; i++) {
1368 if (lv.mask & BIT_ULL(i)) {
1369 /* setting inputs is not allowed */
1370 if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags))
1372 /* add to compacted values */
1373 if (lv.bits & BIT_ULL(i))
1374 __set_bit(num_set, vals);
1376 /* capture desc for the num_set == 1 case */
1377 descs = &lr->lines[i].desc;
1384 /* build compacted desc array */
1385 descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL);
1388 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1389 if (lv.mask & BIT_ULL(i)) {
1390 descs[didx] = lr->lines[i].desc;
1395 ret = gpiod_set_array_value_complex(false, true, num_set,
1403 static long linereq_set_config(struct linereq *lr, void __user *ip)
1405 struct gpio_v2_line_config lc;
1406 struct gpio_desc *desc;
1412 if (copy_from_user(&lc, ip, sizeof(lc)))
1415 ret = gpio_v2_line_config_validate(&lc, lr->num_lines);
1419 guard(mutex)(&lr->config_mutex);
1421 for (i = 0; i < lr->num_lines; i++) {
1422 line = &lr->lines[i];
1423 desc = lr->lines[i].desc;
1424 flags = gpio_v2_line_config_flags(&lc, i);
1426 * Lines not explicitly reconfigured as input or output
1427 * are left unchanged.
1429 if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
1431 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1432 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1433 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1434 int val = gpio_v2_line_config_output_value(&lc, i);
1436 edge_detector_stop(line);
1437 ret = gpiod_direction_output_nonotify(desc, val);
1441 ret = gpiod_direction_input_nonotify(desc);
1445 ret = edge_detector_update(line, &lc, i, edflags);
1450 WRITE_ONCE(line->edflags, edflags);
1452 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
1457 static long linereq_ioctl(struct file *file, unsigned int cmd,
1460 struct linereq *lr = file->private_data;
1461 void __user *ip = (void __user *)arg;
1463 guard(srcu)(&lr->gdev->srcu);
1465 if (!rcu_access_pointer(lr->gdev->chip))
1469 case GPIO_V2_LINE_GET_VALUES_IOCTL:
1470 return linereq_get_values(lr, ip);
1471 case GPIO_V2_LINE_SET_VALUES_IOCTL:
1472 return linereq_set_values(lr, ip);
1473 case GPIO_V2_LINE_SET_CONFIG_IOCTL:
1474 return linereq_set_config(lr, ip);
1480 #ifdef CONFIG_COMPAT
1481 static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
1484 return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1488 static __poll_t linereq_poll(struct file *file,
1489 struct poll_table_struct *wait)
1491 struct linereq *lr = file->private_data;
1492 __poll_t events = 0;
1494 guard(srcu)(&lr->gdev->srcu);
1496 if (!rcu_access_pointer(lr->gdev->chip))
1497 return EPOLLHUP | EPOLLERR;
1499 poll_wait(file, &lr->wait, wait);
1501 if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events,
1503 events = EPOLLIN | EPOLLRDNORM;
1508 static ssize_t linereq_read(struct file *file, char __user *buf,
1509 size_t count, loff_t *f_ps)
1511 struct linereq *lr = file->private_data;
1512 struct gpio_v2_line_event le;
1513 ssize_t bytes_read = 0;
1516 guard(srcu)(&lr->gdev->srcu);
1518 if (!rcu_access_pointer(lr->gdev->chip))
1521 if (count < sizeof(le))
1525 scoped_guard(spinlock, &lr->wait.lock) {
1526 if (kfifo_is_empty(&lr->events)) {
1530 if (file->f_flags & O_NONBLOCK)
1533 ret = wait_event_interruptible_locked(lr->wait,
1534 !kfifo_is_empty(&lr->events));
1539 if (kfifo_out(&lr->events, &le, 1) != 1) {
1541 * This should never happen - we hold the
1542 * lock from the moment we learned the fifo
1543 * is no longer empty until now.
1545 WARN(1, "failed to read from non-empty kfifo");
1550 if (copy_to_user(buf + bytes_read, &le, sizeof(le)))
1552 bytes_read += sizeof(le);
1553 } while (count >= bytes_read + sizeof(le));
1558 static void linereq_free(struct linereq *lr)
1562 if (lr->device_unregistered_nb.notifier_call)
1563 blocking_notifier_chain_unregister(&lr->gdev->device_notifier,
1564 &lr->device_unregistered_nb);
1566 for (i = 0; i < lr->num_lines; i++) {
1567 if (lr->lines[i].desc) {
1568 edge_detector_stop(&lr->lines[i]);
1569 gpiod_free(lr->lines[i].desc);
1572 kfifo_free(&lr->events);
1574 gpio_device_put(lr->gdev);
1578 static int linereq_release(struct inode *inode, struct file *file)
1580 struct linereq *lr = file->private_data;
1586 #ifdef CONFIG_PROC_FS
1587 static void linereq_show_fdinfo(struct seq_file *out, struct file *file)
1589 struct linereq *lr = file->private_data;
1590 struct device *dev = &lr->gdev->dev;
1593 seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev));
1595 for (i = 0; i < lr->num_lines; i++)
1596 seq_printf(out, "gpio-line:\t%d\n",
1597 gpio_chip_hwgpio(lr->lines[i].desc));
1601 static const struct file_operations line_fileops = {
1602 .release = linereq_release,
1603 .read = linereq_read,
1604 .poll = linereq_poll,
1605 .owner = THIS_MODULE,
1606 .llseek = noop_llseek,
1607 .unlocked_ioctl = linereq_ioctl,
1608 #ifdef CONFIG_COMPAT
1609 .compat_ioctl = linereq_ioctl_compat,
1611 #ifdef CONFIG_PROC_FS
1612 .show_fdinfo = linereq_show_fdinfo,
1616 static int linereq_create(struct gpio_device *gdev, void __user *ip)
1618 struct gpio_v2_line_request ulr;
1619 struct gpio_v2_line_config *lc;
1626 if (copy_from_user(&ulr, ip, sizeof(ulr)))
1629 if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX))
1632 if (!mem_is_zero(ulr.padding, sizeof(ulr.padding)))
1636 ret = gpio_v2_line_config_validate(lc, ulr.num_lines);
1640 lr = kvzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL);
1643 lr->num_lines = ulr.num_lines;
1645 lr->gdev = gpio_device_get(gdev);
1647 for (i = 0; i < ulr.num_lines; i++) {
1648 lr->lines[i].req = lr;
1649 WRITE_ONCE(lr->lines[i].sw_debounced, 0);
1650 INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func);
1653 if (ulr.consumer[0] != '\0') {
1654 /* label is only initialized if consumer is set */
1655 lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1,
1659 goto out_free_linereq;
1663 mutex_init(&lr->config_mutex);
1664 init_waitqueue_head(&lr->wait);
1665 INIT_KFIFO(lr->events);
1666 lr->event_buffer_size = ulr.event_buffer_size;
1667 if (lr->event_buffer_size == 0)
1668 lr->event_buffer_size = ulr.num_lines * 16;
1669 else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16)
1670 lr->event_buffer_size = GPIO_V2_LINES_MAX * 16;
1672 atomic_set(&lr->seqno, 0);
1674 /* Request each GPIO */
1675 for (i = 0; i < ulr.num_lines; i++) {
1676 u32 offset = ulr.offsets[i];
1677 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset);
1680 ret = PTR_ERR(desc);
1681 goto out_free_linereq;
1684 ret = gpiod_request_user(desc, lr->label);
1686 goto out_free_linereq;
1688 lr->lines[i].desc = desc;
1689 flags = gpio_v2_line_config_flags(lc, i);
1690 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1692 ret = gpiod_set_transitory(desc, false);
1694 goto out_free_linereq;
1696 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1698 * Lines have to be requested explicitly for input
1699 * or output, else the line will be treated "as is".
1701 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1702 int val = gpio_v2_line_config_output_value(lc, i);
1704 ret = gpiod_direction_output_nonotify(desc, val);
1706 goto out_free_linereq;
1707 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
1708 ret = gpiod_direction_input_nonotify(desc);
1710 goto out_free_linereq;
1712 ret = edge_detector_setup(&lr->lines[i], lc, i,
1715 goto out_free_linereq;
1718 lr->lines[i].edflags = edflags;
1720 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
1722 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
1726 lr->device_unregistered_nb.notifier_call = linereq_unregistered_notify;
1727 ret = blocking_notifier_chain_register(&gdev->device_notifier,
1728 &lr->device_unregistered_nb);
1730 goto out_free_linereq;
1732 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
1735 goto out_free_linereq;
1738 file = anon_inode_getfile("gpio-line", &line_fileops, lr,
1739 O_RDONLY | O_CLOEXEC);
1741 ret = PTR_ERR(file);
1742 goto out_put_unused_fd;
1746 if (copy_to_user(ip, &ulr, sizeof(ulr))) {
1748 * fput() will trigger the release() callback, so do not go onto
1749 * the regular error cleanup path here.
1756 fd_install(fd, file);
1758 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
1770 #ifdef CONFIG_GPIO_CDEV_V1
1773 * GPIO line event management
1777 * struct lineevent_state - contains the state of a userspace event
1778 * @gdev: the GPIO device the event pertains to
1779 * @label: consumer label used to tag descriptors
1780 * @desc: the GPIO descriptor held by this event
1781 * @eflags: the event flags this line was requested with
1782 * @irq: the interrupt that trigger in response to events on this GPIO
1783 * @wait: wait queue that handles blocking reads of events
1784 * @device_unregistered_nb: notifier block for receiving gdev unregister events
1785 * @events: KFIFO for the GPIO events
1786 * @timestamp: cache for the timestamp storing it between hardirq
1787 * and IRQ thread, used to bring the timestamp close to the actual
1790 struct lineevent_state {
1791 struct gpio_device *gdev;
1793 struct gpio_desc *desc;
1796 wait_queue_head_t wait;
1797 struct notifier_block device_unregistered_nb;
1798 DECLARE_KFIFO(events, struct gpioevent_data, 16);
1802 #define GPIOEVENT_REQUEST_VALID_FLAGS \
1803 (GPIOEVENT_REQUEST_RISING_EDGE | \
1804 GPIOEVENT_REQUEST_FALLING_EDGE)
1806 static __poll_t lineevent_poll(struct file *file,
1807 struct poll_table_struct *wait)
1809 struct lineevent_state *le = file->private_data;
1810 __poll_t events = 0;
1812 guard(srcu)(&le->gdev->srcu);
1814 if (!rcu_access_pointer(le->gdev->chip))
1815 return EPOLLHUP | EPOLLERR;
1817 poll_wait(file, &le->wait, wait);
1819 if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock))
1820 events = EPOLLIN | EPOLLRDNORM;
1825 static int lineevent_unregistered_notify(struct notifier_block *nb,
1826 unsigned long action, void *data)
1828 struct lineevent_state *le = container_of(nb, struct lineevent_state,
1829 device_unregistered_nb);
1831 wake_up_poll(&le->wait, EPOLLIN | EPOLLERR);
1836 struct compat_gpioeevent_data {
1837 compat_u64 timestamp;
1841 static ssize_t lineevent_read(struct file *file, char __user *buf,
1842 size_t count, loff_t *f_ps)
1844 struct lineevent_state *le = file->private_data;
1845 struct gpioevent_data ge;
1846 ssize_t bytes_read = 0;
1850 guard(srcu)(&le->gdev->srcu);
1852 if (!rcu_access_pointer(le->gdev->chip))
1856 * When compatible system call is being used the struct gpioevent_data,
1857 * in case of at least ia32, has different size due to the alignment
1858 * differences. Because we have first member 64 bits followed by one of
1859 * 32 bits there is no gap between them. The only difference is the
1860 * padding at the end of the data structure. Hence, we calculate the
1861 * actual sizeof() and pass this as an argument to copy_to_user() to
1862 * drop unneeded bytes from the output.
1864 if (compat_need_64bit_alignment_fixup())
1865 ge_size = sizeof(struct compat_gpioeevent_data);
1867 ge_size = sizeof(struct gpioevent_data);
1868 if (count < ge_size)
1872 scoped_guard(spinlock, &le->wait.lock) {
1873 if (kfifo_is_empty(&le->events)) {
1877 if (file->f_flags & O_NONBLOCK)
1880 ret = wait_event_interruptible_locked(le->wait,
1881 !kfifo_is_empty(&le->events));
1886 if (kfifo_out(&le->events, &ge, 1) != 1) {
1888 * This should never happen - we hold the
1889 * lock from the moment we learned the fifo
1890 * is no longer empty until now.
1892 WARN(1, "failed to read from non-empty kfifo");
1897 if (copy_to_user(buf + bytes_read, &ge, ge_size))
1899 bytes_read += ge_size;
1900 } while (count >= bytes_read + ge_size);
1905 static void lineevent_free(struct lineevent_state *le)
1907 if (le->device_unregistered_nb.notifier_call)
1908 blocking_notifier_chain_unregister(&le->gdev->device_notifier,
1909 &le->device_unregistered_nb);
1911 free_irq_label(free_irq(le->irq, le));
1913 gpiod_free(le->desc);
1915 gpio_device_put(le->gdev);
1919 static int lineevent_release(struct inode *inode, struct file *file)
1921 lineevent_free(file->private_data);
1925 static long lineevent_ioctl(struct file *file, unsigned int cmd,
1928 struct lineevent_state *le = file->private_data;
1929 void __user *ip = (void __user *)arg;
1930 struct gpiohandle_data ghd;
1932 guard(srcu)(&le->gdev->srcu);
1934 if (!rcu_access_pointer(le->gdev->chip))
1938 * We can get the value for an event line but not set it,
1939 * because it is input by definition.
1941 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
1944 memset(&ghd, 0, sizeof(ghd));
1946 val = gpiod_get_value_cansleep(le->desc);
1949 ghd.values[0] = val;
1951 if (copy_to_user(ip, &ghd, sizeof(ghd)))
1959 #ifdef CONFIG_COMPAT
1960 static long lineevent_ioctl_compat(struct file *file, unsigned int cmd,
1963 return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1967 static const struct file_operations lineevent_fileops = {
1968 .release = lineevent_release,
1969 .read = lineevent_read,
1970 .poll = lineevent_poll,
1971 .owner = THIS_MODULE,
1972 .llseek = noop_llseek,
1973 .unlocked_ioctl = lineevent_ioctl,
1974 #ifdef CONFIG_COMPAT
1975 .compat_ioctl = lineevent_ioctl_compat,
1979 static irqreturn_t lineevent_irq_thread(int irq, void *p)
1981 struct lineevent_state *le = p;
1982 struct gpioevent_data ge;
1985 /* Do not leak kernel stack to userspace */
1986 memset(&ge, 0, sizeof(ge));
1989 * We may be running from a nested threaded interrupt in which case
1990 * we didn't get the timestamp from lineevent_irq_handler().
1993 ge.timestamp = ktime_get_ns();
1995 ge.timestamp = le->timestamp;
1997 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
1998 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
1999 int level = gpiod_get_value_cansleep(le->desc);
2002 /* Emit low-to-high event */
2003 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2005 /* Emit high-to-low event */
2006 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2007 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
2008 /* Emit low-to-high event */
2009 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2010 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
2011 /* Emit high-to-low event */
2012 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2017 ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge,
2020 wake_up_poll(&le->wait, EPOLLIN);
2022 pr_debug_ratelimited("event FIFO is full - event dropped\n");
2027 static irqreturn_t lineevent_irq_handler(int irq, void *p)
2029 struct lineevent_state *le = p;
2032 * Just store the timestamp in hardirq context so we get it as
2033 * close in time as possible to the actual event.
2035 le->timestamp = ktime_get_ns();
2037 return IRQ_WAKE_THREAD;
2040 static int lineevent_create(struct gpio_device *gdev, void __user *ip)
2042 struct gpioevent_request eventreq;
2043 struct lineevent_state *le;
2044 struct gpio_desc *desc;
2051 int irq, irqflags = 0;
2054 if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
2057 offset = eventreq.lineoffset;
2058 lflags = eventreq.handleflags;
2059 eflags = eventreq.eventflags;
2061 desc = gpio_device_get_desc(gdev, offset);
2063 return PTR_ERR(desc);
2065 /* Return an error if a unknown flag is set */
2066 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
2067 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS))
2070 /* This is just wrong: we don't look for events on output lines */
2071 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
2072 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
2073 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
2076 /* Only one bias flag can be set. */
2077 if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
2078 (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
2079 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
2080 ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
2081 (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
2084 le = kzalloc(sizeof(*le), GFP_KERNEL);
2087 le->gdev = gpio_device_get(gdev);
2089 if (eventreq.consumer_label[0] != '\0') {
2090 /* label is only initialized if consumer_label is set */
2091 le->label = kstrndup(eventreq.consumer_label,
2092 sizeof(eventreq.consumer_label) - 1,
2100 ret = gpiod_request_user(desc, le->label);
2104 le->eflags = eflags;
2106 linehandle_flags_to_desc_flags(lflags, &desc->flags);
2108 ret = gpiod_direction_input(desc);
2112 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
2114 irq = gpiod_to_irq(desc);
2120 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
2121 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2122 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
2123 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
2124 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2125 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
2126 irqflags |= IRQF_ONESHOT;
2128 INIT_KFIFO(le->events);
2129 init_waitqueue_head(&le->wait);
2131 le->device_unregistered_nb.notifier_call = lineevent_unregistered_notify;
2132 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2133 &le->device_unregistered_nb);
2137 label = make_irq_label(le->label);
2138 if (IS_ERR(label)) {
2139 ret = PTR_ERR(label);
2143 /* Request a thread to read the events */
2144 ret = request_threaded_irq(irq,
2145 lineevent_irq_handler,
2146 lineevent_irq_thread,
2151 free_irq_label(label);
2157 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
2163 file = anon_inode_getfile("gpio-event",
2166 O_RDONLY | O_CLOEXEC);
2168 ret = PTR_ERR(file);
2169 goto out_put_unused_fd;
2173 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
2175 * fput() will trigger the release() callback, so do not go onto
2176 * the regular error cleanup path here.
2183 fd_install(fd, file);
2194 static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2,
2195 struct gpioline_info *info_v1)
2197 u64 flagsv2 = info_v2->flags;
2199 memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name));
2200 memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer));
2201 info_v1->line_offset = info_v2->offset;
2204 if (flagsv2 & GPIO_V2_LINE_FLAG_USED)
2205 info_v1->flags |= GPIOLINE_FLAG_KERNEL;
2207 if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT)
2208 info_v1->flags |= GPIOLINE_FLAG_IS_OUT;
2210 if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
2211 info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW;
2213 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN)
2214 info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN;
2215 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE)
2216 info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE;
2218 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)
2219 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
2220 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN)
2221 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
2222 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED)
2223 info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE;
2226 static void gpio_v2_line_info_changed_to_v1(
2227 struct gpio_v2_line_info_changed *lic_v2,
2228 struct gpioline_info_changed *lic_v1)
2230 memset(lic_v1, 0, sizeof(*lic_v1));
2231 gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
2232 lic_v1->timestamp = lic_v2->timestamp_ns;
2233 lic_v1->event_type = lic_v2->event_type;
2236 #endif /* CONFIG_GPIO_CDEV_V1 */
2238 static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
2239 struct gpio_v2_line_info *info, bool atomic)
2241 u32 debounce_period_us;
2242 unsigned long dflags;
2245 CLASS(gpio_chip_guard, guard)(desc);
2249 memset(info, 0, sizeof(*info));
2250 info->offset = gpio_chip_hwgpio(desc);
2253 strscpy(info->name, desc->name, sizeof(info->name));
2255 dflags = READ_ONCE(desc->flags);
2257 scoped_guard(srcu, &desc->gdev->desc_srcu) {
2258 label = gpiod_get_label(desc);
2259 if (label && test_bit(FLAG_REQUESTED, &dflags))
2260 strscpy(info->consumer, label,
2261 sizeof(info->consumer));
2265 * Userspace only need know that the kernel is using this GPIO so it
2267 * The calculation of the used flag is slightly racy, as it may read
2268 * desc, gc and pinctrl state without a lock covering all three at
2269 * once. Worst case if the line is in transition and the calculation
2270 * is inconsistent then it looks to the user like they performed the
2271 * read on the other side of the transition - but that can always
2273 * The definitive test that a line is available to userspace is to
2276 if (test_bit(FLAG_REQUESTED, &dflags) ||
2277 test_bit(FLAG_IS_HOGGED, &dflags) ||
2278 test_bit(FLAG_EXPORT, &dflags) ||
2279 test_bit(FLAG_SYSFS, &dflags) ||
2280 !gpiochip_line_is_valid(guard.gc, info->offset)) {
2281 info->flags |= GPIO_V2_LINE_FLAG_USED;
2282 } else if (!atomic) {
2283 if (!pinctrl_gpio_can_use_line(guard.gc, info->offset))
2284 info->flags |= GPIO_V2_LINE_FLAG_USED;
2287 if (test_bit(FLAG_IS_OUT, &dflags))
2288 info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
2290 info->flags |= GPIO_V2_LINE_FLAG_INPUT;
2292 if (test_bit(FLAG_ACTIVE_LOW, &dflags))
2293 info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
2295 if (test_bit(FLAG_OPEN_DRAIN, &dflags))
2296 info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
2297 if (test_bit(FLAG_OPEN_SOURCE, &dflags))
2298 info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
2300 if (test_bit(FLAG_BIAS_DISABLE, &dflags))
2301 info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
2302 if (test_bit(FLAG_PULL_DOWN, &dflags))
2303 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
2304 if (test_bit(FLAG_PULL_UP, &dflags))
2305 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
2307 if (test_bit(FLAG_EDGE_RISING, &dflags))
2308 info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
2309 if (test_bit(FLAG_EDGE_FALLING, &dflags))
2310 info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
2312 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &dflags))
2313 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
2314 else if (test_bit(FLAG_EVENT_CLOCK_HTE, &dflags))
2315 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
2317 debounce_period_us = READ_ONCE(desc->debounce_period_us);
2318 if (debounce_period_us) {
2319 info->attrs[info->num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
2320 info->attrs[info->num_attrs].debounce_period_us =
2326 struct gpio_chardev_data {
2327 struct gpio_device *gdev;
2328 wait_queue_head_t wait;
2329 DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32);
2330 struct notifier_block lineinfo_changed_nb;
2331 struct notifier_block device_unregistered_nb;
2332 unsigned long *watched_lines;
2333 #ifdef CONFIG_GPIO_CDEV_V1
2334 atomic_t watch_abi_version;
2339 static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
2341 struct gpio_device *gdev = cdev->gdev;
2342 struct gpiochip_info chipinfo;
2344 memset(&chipinfo, 0, sizeof(chipinfo));
2346 strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
2347 strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
2348 chipinfo.lines = gdev->ngpio;
2349 if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
2354 #ifdef CONFIG_GPIO_CDEV_V1
2356 * returns 0 if the versions match, else the previously selected ABI version
2358 static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
2359 unsigned int version)
2361 int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version);
2363 if (abiv == version)
2369 static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
2372 struct gpio_desc *desc;
2373 struct gpioline_info lineinfo;
2374 struct gpio_v2_line_info lineinfo_v2;
2376 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2379 /* this doubles as a range check on line_offset */
2380 desc = gpio_device_get_desc(cdev->gdev, lineinfo.line_offset);
2382 return PTR_ERR(desc);
2385 if (lineinfo_ensure_abi_version(cdev, 1))
2388 if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
2392 gpio_desc_to_lineinfo(desc, &lineinfo_v2, false);
2393 gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
2395 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2397 clear_bit(lineinfo.line_offset, cdev->watched_lines);
2405 static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
2408 struct gpio_desc *desc;
2409 struct gpio_v2_line_info lineinfo;
2411 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2414 if (!mem_is_zero(lineinfo.padding, sizeof(lineinfo.padding)))
2417 desc = gpio_device_get_desc(cdev->gdev, lineinfo.offset);
2419 return PTR_ERR(desc);
2422 #ifdef CONFIG_GPIO_CDEV_V1
2423 if (lineinfo_ensure_abi_version(cdev, 2))
2426 if (test_and_set_bit(lineinfo.offset, cdev->watched_lines))
2429 gpio_desc_to_lineinfo(desc, &lineinfo, false);
2431 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2433 clear_bit(lineinfo.offset, cdev->watched_lines);
2440 static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
2444 if (copy_from_user(&offset, ip, sizeof(offset)))
2447 if (offset >= cdev->gdev->ngpio)
2450 if (!test_and_clear_bit(offset, cdev->watched_lines))
2457 * gpio_ioctl() - ioctl handler for the GPIO chardev
2459 static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2461 struct gpio_chardev_data *cdev = file->private_data;
2462 struct gpio_device *gdev = cdev->gdev;
2463 void __user *ip = (void __user *)arg;
2465 guard(srcu)(&gdev->srcu);
2467 /* We fail any subsequent ioctl():s when the chip is gone */
2468 if (!rcu_access_pointer(gdev->chip))
2471 /* Fill in the struct and pass to userspace */
2473 case GPIO_GET_CHIPINFO_IOCTL:
2474 return chipinfo_get(cdev, ip);
2475 #ifdef CONFIG_GPIO_CDEV_V1
2476 case GPIO_GET_LINEHANDLE_IOCTL:
2477 return linehandle_create(gdev, ip);
2478 case GPIO_GET_LINEEVENT_IOCTL:
2479 return lineevent_create(gdev, ip);
2480 case GPIO_GET_LINEINFO_IOCTL:
2481 return lineinfo_get_v1(cdev, ip, false);
2482 case GPIO_GET_LINEINFO_WATCH_IOCTL:
2483 return lineinfo_get_v1(cdev, ip, true);
2484 #endif /* CONFIG_GPIO_CDEV_V1 */
2485 case GPIO_V2_GET_LINEINFO_IOCTL:
2486 return lineinfo_get(cdev, ip, false);
2487 case GPIO_V2_GET_LINEINFO_WATCH_IOCTL:
2488 return lineinfo_get(cdev, ip, true);
2489 case GPIO_V2_GET_LINE_IOCTL:
2490 return linereq_create(gdev, ip);
2491 case GPIO_GET_LINEINFO_UNWATCH_IOCTL:
2492 return lineinfo_unwatch(cdev, ip);
2498 #ifdef CONFIG_COMPAT
2499 static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
2502 return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2506 struct lineinfo_changed_ctx {
2507 struct work_struct work;
2508 struct gpio_v2_line_info_changed chg;
2509 struct gpio_device *gdev;
2510 struct gpio_chardev_data *cdev;
2513 static void lineinfo_changed_func(struct work_struct *work)
2515 struct lineinfo_changed_ctx *ctx =
2516 container_of(work, struct lineinfo_changed_ctx, work);
2517 struct gpio_chip *gc;
2520 if (!(ctx->chg.info.flags & GPIO_V2_LINE_FLAG_USED)) {
2522 * If nobody set the USED flag earlier, let's see with pinctrl
2523 * now. We're doing this late because it's a sleeping function.
2524 * Pin functions are in general much more static and while it's
2525 * not 100% bullet-proof, it's good enough for most cases.
2527 scoped_guard(srcu, &ctx->gdev->srcu) {
2528 gc = srcu_dereference(ctx->gdev->chip, &ctx->gdev->srcu);
2530 !pinctrl_gpio_can_use_line(gc, ctx->chg.info.offset))
2531 ctx->chg.info.flags |= GPIO_V2_LINE_FLAG_USED;
2535 ret = kfifo_in_spinlocked(&ctx->cdev->events, &ctx->chg, 1,
2536 &ctx->cdev->wait.lock);
2538 wake_up_poll(&ctx->cdev->wait, EPOLLIN);
2540 pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
2542 gpio_device_put(ctx->gdev);
2543 fput(ctx->cdev->fp);
2547 static int lineinfo_changed_notify(struct notifier_block *nb,
2548 unsigned long action, void *data)
2550 struct gpio_chardev_data *cdev =
2551 container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
2552 struct lineinfo_changed_ctx *ctx;
2553 struct gpio_desc *desc = data;
2555 if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines))
2559 * If this is called from atomic context (for instance: with a spinlock
2560 * taken by the atomic notifier chain), any sleeping calls must be done
2561 * outside of this function in process context of the dedicated
2564 * Let's gather as much info as possible from the descriptor and
2565 * postpone just the call to pinctrl_gpio_can_use_line() until the work
2569 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
2571 pr_err("Failed to allocate memory for line info notification\n");
2575 ctx->chg.event_type = action;
2576 ctx->chg.timestamp_ns = ktime_get_ns();
2577 gpio_desc_to_lineinfo(desc, &ctx->chg.info, true);
2578 /* Keep the GPIO device alive until we emit the event. */
2579 ctx->gdev = gpio_device_get(desc->gdev);
2581 /* Keep the file descriptor alive too. */
2582 get_file(ctx->cdev->fp);
2584 INIT_WORK(&ctx->work, lineinfo_changed_func);
2585 queue_work(ctx->gdev->line_state_wq, &ctx->work);
2590 static int gpio_device_unregistered_notify(struct notifier_block *nb,
2591 unsigned long action, void *data)
2593 struct gpio_chardev_data *cdev = container_of(nb,
2594 struct gpio_chardev_data,
2595 device_unregistered_nb);
2597 wake_up_poll(&cdev->wait, EPOLLIN | EPOLLERR);
2602 static __poll_t lineinfo_watch_poll(struct file *file,
2603 struct poll_table_struct *pollt)
2605 struct gpio_chardev_data *cdev = file->private_data;
2606 __poll_t events = 0;
2608 guard(srcu)(&cdev->gdev->srcu);
2610 if (!rcu_access_pointer(cdev->gdev->chip))
2611 return EPOLLHUP | EPOLLERR;
2613 poll_wait(file, &cdev->wait, pollt);
2615 if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events,
2617 events = EPOLLIN | EPOLLRDNORM;
2622 static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
2623 size_t count, loff_t *off)
2625 struct gpio_chardev_data *cdev = file->private_data;
2626 struct gpio_v2_line_info_changed event;
2627 ssize_t bytes_read = 0;
2631 guard(srcu)(&cdev->gdev->srcu);
2633 if (!rcu_access_pointer(cdev->gdev->chip))
2636 #ifndef CONFIG_GPIO_CDEV_V1
2637 event_size = sizeof(struct gpio_v2_line_info_changed);
2638 if (count < event_size)
2643 scoped_guard(spinlock, &cdev->wait.lock) {
2644 if (kfifo_is_empty(&cdev->events)) {
2648 if (file->f_flags & O_NONBLOCK)
2651 ret = wait_event_interruptible_locked(cdev->wait,
2652 !kfifo_is_empty(&cdev->events));
2656 #ifdef CONFIG_GPIO_CDEV_V1
2657 /* must be after kfifo check so watch_abi_version is set */
2658 if (atomic_read(&cdev->watch_abi_version) == 2)
2659 event_size = sizeof(struct gpio_v2_line_info_changed);
2661 event_size = sizeof(struct gpioline_info_changed);
2662 if (count < event_size)
2665 if (kfifo_out(&cdev->events, &event, 1) != 1) {
2667 * This should never happen - we hold the
2668 * lock from the moment we learned the fifo
2669 * is no longer empty until now.
2671 WARN(1, "failed to read from non-empty kfifo");
2676 #ifdef CONFIG_GPIO_CDEV_V1
2677 if (event_size == sizeof(struct gpio_v2_line_info_changed)) {
2678 if (copy_to_user(buf + bytes_read, &event, event_size))
2681 struct gpioline_info_changed event_v1;
2683 gpio_v2_line_info_changed_to_v1(&event, &event_v1);
2684 if (copy_to_user(buf + bytes_read, &event_v1,
2689 if (copy_to_user(buf + bytes_read, &event, event_size))
2692 bytes_read += event_size;
2693 } while (count >= bytes_read + sizeof(event));
2699 * gpio_chrdev_open() - open the chardev for ioctl operations
2700 * @inode: inode for this chardev
2701 * @file: file struct for storing private data
2704 * 0 on success, or negative errno on failure.
2706 static int gpio_chrdev_open(struct inode *inode, struct file *file)
2708 struct gpio_device *gdev = container_of(inode->i_cdev,
2709 struct gpio_device, chrdev);
2710 struct gpio_chardev_data *cdev;
2713 guard(srcu)(&gdev->srcu);
2715 /* Fail on open if the backing gpiochip is gone */
2716 if (!rcu_access_pointer(gdev->chip))
2719 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
2723 cdev->watched_lines = bitmap_zalloc(gdev->ngpio, GFP_KERNEL);
2724 if (!cdev->watched_lines)
2727 init_waitqueue_head(&cdev->wait);
2728 INIT_KFIFO(cdev->events);
2729 cdev->gdev = gpio_device_get(gdev);
2731 cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
2732 ret = atomic_notifier_chain_register(&gdev->line_state_notifier,
2733 &cdev->lineinfo_changed_nb);
2735 goto out_free_bitmap;
2737 cdev->device_unregistered_nb.notifier_call =
2738 gpio_device_unregistered_notify;
2739 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2740 &cdev->device_unregistered_nb);
2742 goto out_unregister_line_notifier;
2744 file->private_data = cdev;
2747 ret = nonseekable_open(inode, file);
2749 goto out_unregister_device_notifier;
2753 out_unregister_device_notifier:
2754 blocking_notifier_chain_unregister(&gdev->device_notifier,
2755 &cdev->device_unregistered_nb);
2756 out_unregister_line_notifier:
2757 atomic_notifier_chain_unregister(&gdev->line_state_notifier,
2758 &cdev->lineinfo_changed_nb);
2760 gpio_device_put(gdev);
2761 bitmap_free(cdev->watched_lines);
2768 * gpio_chrdev_release() - close chardev after ioctl operations
2769 * @inode: inode for this chardev
2770 * @file: file struct for storing private data
2773 * 0 on success, or negative errno on failure.
2775 static int gpio_chrdev_release(struct inode *inode, struct file *file)
2777 struct gpio_chardev_data *cdev = file->private_data;
2778 struct gpio_device *gdev = cdev->gdev;
2780 blocking_notifier_chain_unregister(&gdev->device_notifier,
2781 &cdev->device_unregistered_nb);
2782 atomic_notifier_chain_unregister(&gdev->line_state_notifier,
2783 &cdev->lineinfo_changed_nb);
2784 bitmap_free(cdev->watched_lines);
2785 gpio_device_put(gdev);
2791 static const struct file_operations gpio_fileops = {
2792 .release = gpio_chrdev_release,
2793 .open = gpio_chrdev_open,
2794 .poll = lineinfo_watch_poll,
2795 .read = lineinfo_watch_read,
2796 .owner = THIS_MODULE,
2797 .unlocked_ioctl = gpio_ioctl,
2798 #ifdef CONFIG_COMPAT
2799 .compat_ioctl = gpio_ioctl_compat,
2803 int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
2805 struct gpio_chip *gc;
2808 cdev_init(&gdev->chrdev, &gpio_fileops);
2809 gdev->chrdev.owner = THIS_MODULE;
2810 gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id);
2812 gdev->line_state_wq = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2813 dev_name(&gdev->dev));
2814 if (!gdev->line_state_wq)
2817 ret = cdev_device_add(&gdev->chrdev, &gdev->dev);
2821 guard(srcu)(&gdev->srcu);
2822 gc = srcu_dereference(gdev->chip, &gdev->srcu);
2826 chip_dbg(gc, "added GPIO chardev (%d:%d)\n", MAJOR(devt), gdev->id);
2831 void gpiolib_cdev_unregister(struct gpio_device *gdev)
2833 destroy_workqueue(gdev->line_state_wq);
2834 cdev_device_del(&gdev->chrdev, &gdev->dev);
2835 blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL);