6 * Copyright (C) 2009--2010 Nokia Corporation.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
20 #include <media/v4l2-dev.h>
21 #include <media/v4l2-fh.h>
22 #include <media/v4l2-event.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/export.h>
29 static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
32 return idx >= sev->elems ? idx - sev->elems : idx;
35 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
37 struct v4l2_kevent *kev;
40 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
42 if (list_empty(&fh->available)) {
43 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
47 WARN_ON(fh->navailable == 0);
49 kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
53 kev->event.pending = fh->navailable;
55 kev->sev->first = sev_pos(kev->sev, 1);
58 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
63 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
69 return __v4l2_event_dequeue(fh, event);
71 /* Release the vdev lock while waiting */
73 mutex_unlock(fh->vdev->lock);
76 ret = wait_event_interruptible(fh->wait,
81 ret = __v4l2_event_dequeue(fh, event);
82 } while (ret == -ENOENT);
85 mutex_lock(fh->vdev->lock);
89 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
91 /* Caller must hold fh->vdev->fh_lock! */
92 static struct v4l2_subscribed_event *v4l2_event_subscribed(
93 struct v4l2_fh *fh, u32 type, u32 id)
95 struct v4l2_subscribed_event *sev;
97 assert_spin_locked(&fh->vdev->fh_lock);
99 list_for_each_entry(sev, &fh->subscribed, list)
100 if (sev->type == type && sev->id == id)
106 static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
107 const struct timespec *ts)
109 struct v4l2_subscribed_event *sev;
110 struct v4l2_kevent *kev;
111 bool copy_payload = true;
113 /* Are we subscribed? */
114 sev = v4l2_event_subscribed(fh, ev->type, ev->id);
118 /* Increase event sequence number on fh. */
121 /* Do we have any free events? */
122 if (sev->in_use == sev->elems) {
123 /* no, remove the oldest one */
124 kev = sev->events + sev_pos(sev, 0);
125 list_del(&kev->list);
127 sev->first = sev_pos(sev, 1);
129 if (sev->elems == 1) {
130 if (sev->ops && sev->ops->replace) {
131 sev->ops->replace(&kev->event, ev);
132 copy_payload = false;
134 } else if (sev->ops && sev->ops->merge) {
135 struct v4l2_kevent *second_oldest =
136 sev->events + sev_pos(sev, 0);
137 sev->ops->merge(&kev->event, &second_oldest->event);
141 /* Take one and fill it. */
142 kev = sev->events + sev_pos(sev, sev->in_use);
143 kev->event.type = ev->type;
145 kev->event.u = ev->u;
146 kev->event.id = ev->id;
147 kev->event.timestamp = *ts;
148 kev->event.sequence = fh->sequence;
150 list_add_tail(&kev->list, &fh->available);
154 wake_up_all(&fh->wait);
157 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
161 struct timespec timestamp;
166 ktime_get_ts(×tamp);
168 spin_lock_irqsave(&vdev->fh_lock, flags);
170 list_for_each_entry(fh, &vdev->fh_list, list)
171 __v4l2_event_queue_fh(fh, ev, ×tamp);
173 spin_unlock_irqrestore(&vdev->fh_lock, flags);
175 EXPORT_SYMBOL_GPL(v4l2_event_queue);
177 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
180 struct timespec timestamp;
182 ktime_get_ts(×tamp);
184 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
185 __v4l2_event_queue_fh(fh, ev, ×tamp);
186 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
188 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
190 int v4l2_event_pending(struct v4l2_fh *fh)
192 return fh->navailable;
194 EXPORT_SYMBOL_GPL(v4l2_event_pending);
196 static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
198 struct v4l2_fh *fh = sev->fh;
201 lockdep_assert_held(&fh->subscribe_lock);
202 assert_spin_locked(&fh->vdev->fh_lock);
204 /* Remove any pending events for this subscription */
205 for (i = 0; i < sev->in_use; i++) {
206 list_del(&sev->events[sev_pos(sev, i)].list);
209 list_del(&sev->list);
212 int v4l2_event_subscribe(struct v4l2_fh *fh,
213 const struct v4l2_event_subscription *sub, unsigned elems,
214 const struct v4l2_subscribed_event_ops *ops)
216 struct v4l2_subscribed_event *sev, *found_ev;
221 if (sub->type == V4L2_EVENT_ALL)
227 sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
230 for (i = 0; i < elems; i++)
231 sev->events[i].sev = sev;
232 sev->type = sub->type;
234 sev->flags = sub->flags;
239 mutex_lock(&fh->subscribe_lock);
241 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
242 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
244 list_add(&sev->list, &fh->subscribed);
245 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
248 /* Already listening */
250 } else if (sev->ops && sev->ops->add) {
251 ret = sev->ops->add(sev, elems);
253 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
254 __v4l2_event_unsubscribe(sev);
255 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
260 mutex_unlock(&fh->subscribe_lock);
264 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
266 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
268 struct v4l2_event_subscription sub;
269 struct v4l2_subscribed_event *sev;
275 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
276 if (!list_empty(&fh->subscribed)) {
277 sev = list_first_entry(&fh->subscribed,
278 struct v4l2_subscribed_event, list);
279 sub.type = sev->type;
282 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
284 v4l2_event_unsubscribe(fh, &sub);
287 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
289 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
290 const struct v4l2_event_subscription *sub)
292 struct v4l2_subscribed_event *sev;
295 if (sub->type == V4L2_EVENT_ALL) {
296 v4l2_event_unsubscribe_all(fh);
300 mutex_lock(&fh->subscribe_lock);
302 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
304 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
306 __v4l2_event_unsubscribe(sev);
308 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
310 if (sev && sev->ops && sev->ops->del)
313 mutex_unlock(&fh->subscribe_lock);
319 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
321 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
322 struct v4l2_event_subscription *sub)
324 return v4l2_event_unsubscribe(fh, sub);
326 EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
328 static void v4l2_event_src_replace(struct v4l2_event *old,
329 const struct v4l2_event *new)
331 u32 old_changes = old->u.src_change.changes;
333 old->u.src_change = new->u.src_change;
334 old->u.src_change.changes |= old_changes;
337 static void v4l2_event_src_merge(const struct v4l2_event *old,
338 struct v4l2_event *new)
340 new->u.src_change.changes |= old->u.src_change.changes;
343 static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
344 .replace = v4l2_event_src_replace,
345 .merge = v4l2_event_src_merge,
348 int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
349 const struct v4l2_event_subscription *sub)
351 if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
352 return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
355 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
357 int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
358 struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
360 return v4l2_src_change_event_subscribe(fh, sub);
362 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);