1 // SPDX-License-Identifier: GPL-2.0-only
7 * Copyright (C) 2009--2010 Nokia Corporation.
12 #include <media/v4l2-dev.h>
13 #include <media/v4l2-fh.h>
14 #include <media/v4l2-event.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/export.h>
21 static unsigned int sev_pos(const struct v4l2_subscribed_event *sev, unsigned int idx)
24 return idx >= sev->elems ? idx - sev->elems : idx;
27 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
29 struct v4l2_kevent *kev;
33 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
35 if (list_empty(&fh->available)) {
36 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
40 WARN_ON(fh->navailable == 0);
42 kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
46 kev->event.pending = fh->navailable;
48 ts = ns_to_timespec64(kev->ts);
49 event->timestamp.tv_sec = ts.tv_sec;
50 event->timestamp.tv_nsec = ts.tv_nsec;
51 kev->sev->first = sev_pos(kev->sev, 1);
54 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
59 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
65 return __v4l2_event_dequeue(fh, event);
67 /* Release the vdev lock while waiting */
69 mutex_unlock(fh->vdev->lock);
72 ret = wait_event_interruptible(fh->wait,
77 ret = __v4l2_event_dequeue(fh, event);
78 } while (ret == -ENOENT);
81 mutex_lock(fh->vdev->lock);
85 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
87 /* Caller must hold fh->vdev->fh_lock! */
88 static struct v4l2_subscribed_event *v4l2_event_subscribed(
89 struct v4l2_fh *fh, u32 type, u32 id)
91 struct v4l2_subscribed_event *sev;
93 assert_spin_locked(&fh->vdev->fh_lock);
95 list_for_each_entry(sev, &fh->subscribed, list)
96 if (sev->type == type && sev->id == id)
102 static void __v4l2_event_queue_fh(struct v4l2_fh *fh,
103 const struct v4l2_event *ev, u64 ts)
105 struct v4l2_subscribed_event *sev;
106 struct v4l2_kevent *kev;
107 bool copy_payload = true;
109 /* Are we subscribed? */
110 sev = v4l2_event_subscribed(fh, ev->type, ev->id);
114 /* Increase event sequence number on fh. */
117 /* Do we have any free events? */
118 if (sev->in_use == sev->elems) {
119 /* no, remove the oldest one */
120 kev = sev->events + sev_pos(sev, 0);
121 list_del(&kev->list);
123 sev->first = sev_pos(sev, 1);
125 if (sev->elems == 1) {
126 if (sev->ops && sev->ops->replace) {
127 sev->ops->replace(&kev->event, ev);
128 copy_payload = false;
130 } else if (sev->ops && sev->ops->merge) {
131 struct v4l2_kevent *second_oldest =
132 sev->events + sev_pos(sev, 0);
133 sev->ops->merge(&kev->event, &second_oldest->event);
137 /* Take one and fill it. */
138 kev = sev->events + sev_pos(sev, sev->in_use);
139 kev->event.type = ev->type;
141 kev->event.u = ev->u;
142 kev->event.id = ev->id;
144 kev->event.sequence = fh->sequence;
146 list_add_tail(&kev->list, &fh->available);
150 wake_up_all(&fh->wait);
153 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
164 spin_lock_irqsave(&vdev->fh_lock, flags);
166 list_for_each_entry(fh, &vdev->fh_list, list)
167 __v4l2_event_queue_fh(fh, ev, ts);
169 spin_unlock_irqrestore(&vdev->fh_lock, flags);
171 EXPORT_SYMBOL_GPL(v4l2_event_queue);
173 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
176 u64 ts = ktime_get_ns();
178 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
179 __v4l2_event_queue_fh(fh, ev, ts);
180 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
182 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
184 int v4l2_event_pending(struct v4l2_fh *fh)
186 return fh->navailable;
188 EXPORT_SYMBOL_GPL(v4l2_event_pending);
190 void v4l2_event_wake_all(struct video_device *vdev)
198 spin_lock_irqsave(&vdev->fh_lock, flags);
200 list_for_each_entry(fh, &vdev->fh_list, list)
201 wake_up_all(&fh->wait);
203 spin_unlock_irqrestore(&vdev->fh_lock, flags);
205 EXPORT_SYMBOL_GPL(v4l2_event_wake_all);
207 static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
209 struct v4l2_fh *fh = sev->fh;
212 lockdep_assert_held(&fh->subscribe_lock);
213 assert_spin_locked(&fh->vdev->fh_lock);
215 /* Remove any pending events for this subscription */
216 for (i = 0; i < sev->in_use; i++) {
217 list_del(&sev->events[sev_pos(sev, i)].list);
220 list_del(&sev->list);
223 int v4l2_event_subscribe(struct v4l2_fh *fh,
224 const struct v4l2_event_subscription *sub, unsigned int elems,
225 const struct v4l2_subscribed_event_ops *ops)
227 struct v4l2_subscribed_event *sev, *found_ev;
232 if (sub->type == V4L2_EVENT_ALL)
238 sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
241 for (i = 0; i < elems; i++)
242 sev->events[i].sev = sev;
243 sev->type = sub->type;
245 sev->flags = sub->flags;
250 mutex_lock(&fh->subscribe_lock);
252 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
253 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
255 list_add(&sev->list, &fh->subscribed);
256 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
259 /* Already listening */
261 } else if (sev->ops && sev->ops->add) {
262 ret = sev->ops->add(sev, elems);
264 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
265 __v4l2_event_unsubscribe(sev);
266 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
271 mutex_unlock(&fh->subscribe_lock);
275 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
277 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
279 struct v4l2_event_subscription sub;
280 struct v4l2_subscribed_event *sev;
286 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
287 if (!list_empty(&fh->subscribed)) {
288 sev = list_first_entry(&fh->subscribed,
289 struct v4l2_subscribed_event, list);
290 sub.type = sev->type;
293 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
295 v4l2_event_unsubscribe(fh, &sub);
298 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
300 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
301 const struct v4l2_event_subscription *sub)
303 struct v4l2_subscribed_event *sev;
306 if (sub->type == V4L2_EVENT_ALL) {
307 v4l2_event_unsubscribe_all(fh);
311 mutex_lock(&fh->subscribe_lock);
313 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
315 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
317 __v4l2_event_unsubscribe(sev);
319 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
321 if (sev && sev->ops && sev->ops->del)
324 mutex_unlock(&fh->subscribe_lock);
330 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
332 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
333 struct v4l2_event_subscription *sub)
335 return v4l2_event_unsubscribe(fh, sub);
337 EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
339 static void v4l2_event_src_replace(struct v4l2_event *old,
340 const struct v4l2_event *new)
342 u32 old_changes = old->u.src_change.changes;
344 old->u.src_change = new->u.src_change;
345 old->u.src_change.changes |= old_changes;
348 static void v4l2_event_src_merge(const struct v4l2_event *old,
349 struct v4l2_event *new)
351 new->u.src_change.changes |= old->u.src_change.changes;
354 static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
355 .replace = v4l2_event_src_replace,
356 .merge = v4l2_event_src_merge,
359 int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
360 const struct v4l2_event_subscription *sub)
362 if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
363 return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
366 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
368 int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
369 struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
371 return v4l2_src_change_event_subscribe(fh, sub);
373 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);