]>
Commit | Line | Data |
---|---|---|
c3b5b024 SA |
1 | /* |
2 | * v4l2-event.c | |
3 | * | |
4 | * V4L2 events. | |
5 | * | |
6 | * Copyright (C) 2009--2010 Nokia Corporation. | |
7 | * | |
8c5dff90 | 8 | * Contact: Sakari Ailus <[email protected]> |
c3b5b024 SA |
9 | * |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License | |
12 | * version 2 as published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
c3b5b024 SA |
18 | */ |
19 | ||
20 | #include <media/v4l2-dev.h> | |
21 | #include <media/v4l2-fh.h> | |
22 | #include <media/v4l2-event.h> | |
23 | ||
758d90e1 | 24 | #include <linux/mm.h> |
c3b5b024 SA |
25 | #include <linux/sched.h> |
26 | #include <linux/slab.h> | |
35a24636 | 27 | #include <linux/export.h> |
c3b5b024 | 28 | |
f1e393de | 29 | static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx) |
c3b5b024 | 30 | { |
f1e393de HV |
31 | idx += sev->first; |
32 | return idx >= sev->elems ? idx - sev->elems : idx; | |
c3b5b024 | 33 | } |
c3b5b024 SA |
34 | |
35 | static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event) | |
36 | { | |
c3b5b024 SA |
37 | struct v4l2_kevent *kev; |
38 | unsigned long flags; | |
39 | ||
40 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); | |
41 | ||
523f46d6 | 42 | if (list_empty(&fh->available)) { |
c3b5b024 SA |
43 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); |
44 | return -ENOENT; | |
45 | } | |
46 | ||
523f46d6 | 47 | WARN_ON(fh->navailable == 0); |
c3b5b024 | 48 | |
523f46d6 | 49 | kev = list_first_entry(&fh->available, struct v4l2_kevent, list); |
f1e393de | 50 | list_del(&kev->list); |
523f46d6 | 51 | fh->navailable--; |
c3b5b024 | 52 | |
523f46d6 | 53 | kev->event.pending = fh->navailable; |
c3b5b024 | 54 | *event = kev->event; |
f1e393de HV |
55 | kev->sev->first = sev_pos(kev->sev, 1); |
56 | kev->sev->in_use--; | |
c3b5b024 SA |
57 | |
58 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); | |
59 | ||
60 | return 0; | |
61 | } | |
62 | ||
63 | int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, | |
64 | int nonblocking) | |
65 | { | |
c3b5b024 SA |
66 | int ret; |
67 | ||
68 | if (nonblocking) | |
69 | return __v4l2_event_dequeue(fh, event); | |
70 | ||
ee6869af HV |
71 | /* Release the vdev lock while waiting */ |
72 | if (fh->vdev->lock) | |
73 | mutex_unlock(fh->vdev->lock); | |
74 | ||
c3b5b024 | 75 | do { |
523f46d6 HV |
76 | ret = wait_event_interruptible(fh->wait, |
77 | fh->navailable != 0); | |
c3b5b024 | 78 | if (ret < 0) |
ee6869af | 79 | break; |
c3b5b024 SA |
80 | |
81 | ret = __v4l2_event_dequeue(fh, event); | |
82 | } while (ret == -ENOENT); | |
83 | ||
ee6869af HV |
84 | if (fh->vdev->lock) |
85 | mutex_lock(fh->vdev->lock); | |
86 | ||
c3b5b024 SA |
87 | return ret; |
88 | } | |
0a4f8d07 | 89 | EXPORT_SYMBOL_GPL(v4l2_event_dequeue); |
c3b5b024 | 90 | |
6e239399 | 91 | /* Caller must hold fh->vdev->fh_lock! */ |
c3b5b024 | 92 | static struct v4l2_subscribed_event *v4l2_event_subscribed( |
6e239399 | 93 | struct v4l2_fh *fh, u32 type, u32 id) |
c3b5b024 | 94 | { |
c3b5b024 SA |
95 | struct v4l2_subscribed_event *sev; |
96 | ||
f3cd385a | 97 | assert_spin_locked(&fh->vdev->fh_lock); |
c3b5b024 | 98 | |
3f66f0ed | 99 | list_for_each_entry(sev, &fh->subscribed, list) |
6e239399 | 100 | if (sev->type == type && sev->id == id) |
c3b5b024 | 101 | return sev; |
c3b5b024 SA |
102 | |
103 | return NULL; | |
104 | } | |
105 | ||
6e239399 HV |
106 | static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev, |
107 | const struct timespec *ts) | |
108 | { | |
6e239399 HV |
109 | struct v4l2_subscribed_event *sev; |
110 | struct v4l2_kevent *kev; | |
2151bdc8 | 111 | bool copy_payload = true; |
6e239399 HV |
112 | |
113 | /* Are we subscribed? */ | |
114 | sev = v4l2_event_subscribed(fh, ev->type, ev->id); | |
115 | if (sev == NULL) | |
116 | return; | |
117 | ||
118 | /* Increase event sequence number on fh. */ | |
523f46d6 | 119 | fh->sequence++; |
6e239399 HV |
120 | |
121 | /* Do we have any free events? */ | |
f1e393de HV |
122 | if (sev->in_use == sev->elems) { |
123 | /* no, remove the oldest one */ | |
124 | kev = sev->events + sev_pos(sev, 0); | |
125 | list_del(&kev->list); | |
126 | sev->in_use--; | |
127 | sev->first = sev_pos(sev, 1); | |
128 | fh->navailable--; | |
2151bdc8 | 129 | if (sev->elems == 1) { |
c53c2549 HG |
130 | if (sev->ops && sev->ops->replace) { |
131 | sev->ops->replace(&kev->event, ev); | |
2151bdc8 HV |
132 | copy_payload = false; |
133 | } | |
c53c2549 | 134 | } else if (sev->ops && sev->ops->merge) { |
2151bdc8 HV |
135 | struct v4l2_kevent *second_oldest = |
136 | sev->events + sev_pos(sev, 0); | |
c53c2549 | 137 | sev->ops->merge(&kev->event, &second_oldest->event); |
2151bdc8 | 138 | } |
f1e393de | 139 | } |
6e239399 HV |
140 | |
141 | /* Take one and fill it. */ | |
f1e393de | 142 | kev = sev->events + sev_pos(sev, sev->in_use); |
6e239399 | 143 | kev->event.type = ev->type; |
2151bdc8 HV |
144 | if (copy_payload) |
145 | kev->event.u = ev->u; | |
6e239399 HV |
146 | kev->event.id = ev->id; |
147 | kev->event.timestamp = *ts; | |
523f46d6 | 148 | kev->event.sequence = fh->sequence; |
f1e393de HV |
149 | sev->in_use++; |
150 | list_add_tail(&kev->list, &fh->available); | |
6e239399 | 151 | |
523f46d6 | 152 | fh->navailable++; |
6e239399 | 153 | |
523f46d6 | 154 | wake_up_all(&fh->wait); |
6e239399 HV |
155 | } |
156 | ||
c3b5b024 SA |
157 | void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev) |
158 | { | |
159 | struct v4l2_fh *fh; | |
160 | unsigned long flags; | |
161 | struct timespec timestamp; | |
162 | ||
fb8dfda9 HV |
163 | if (vdev == NULL) |
164 | return; | |
165 | ||
c3b5b024 SA |
166 | ktime_get_ts(×tamp); |
167 | ||
168 | spin_lock_irqsave(&vdev->fh_lock, flags); | |
169 | ||
3f66f0ed | 170 | list_for_each_entry(fh, &vdev->fh_list, list) |
6e239399 | 171 | __v4l2_event_queue_fh(fh, ev, ×tamp); |
c3b5b024 SA |
172 | |
173 | spin_unlock_irqrestore(&vdev->fh_lock, flags); | |
174 | } | |
175 | EXPORT_SYMBOL_GPL(v4l2_event_queue); | |
176 | ||
6e239399 HV |
177 | void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev) |
178 | { | |
179 | unsigned long flags; | |
180 | struct timespec timestamp; | |
181 | ||
182 | ktime_get_ts(×tamp); | |
183 | ||
184 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); | |
185 | __v4l2_event_queue_fh(fh, ev, ×tamp); | |
186 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); | |
187 | } | |
188 | EXPORT_SYMBOL_GPL(v4l2_event_queue_fh); | |
189 | ||
c3b5b024 SA |
190 | int v4l2_event_pending(struct v4l2_fh *fh) |
191 | { | |
523f46d6 | 192 | return fh->navailable; |
c3b5b024 SA |
193 | } |
194 | EXPORT_SYMBOL_GPL(v4l2_event_pending); | |
195 | ||
92539d3e SA |
196 | static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev) |
197 | { | |
198 | struct v4l2_fh *fh = sev->fh; | |
199 | unsigned int i; | |
200 | ||
201 | lockdep_assert_held(&fh->subscribe_lock); | |
202 | assert_spin_locked(&fh->vdev->fh_lock); | |
203 | ||
204 | /* Remove any pending events for this subscription */ | |
205 | for (i = 0; i < sev->in_use; i++) { | |
206 | list_del(&sev->events[sev_pos(sev, i)].list); | |
207 | fh->navailable--; | |
208 | } | |
209 | list_del(&sev->list); | |
210 | } | |
211 | ||
c3b5b024 | 212 | int v4l2_event_subscribe(struct v4l2_fh *fh, |
85f5fe39 | 213 | const struct v4l2_event_subscription *sub, unsigned elems, |
c53c2549 | 214 | const struct v4l2_subscribed_event_ops *ops) |
c3b5b024 | 215 | { |
6e239399 | 216 | struct v4l2_subscribed_event *sev, *found_ev; |
c3b5b024 | 217 | unsigned long flags; |
f1e393de | 218 | unsigned i; |
ad608fbc | 219 | int ret = 0; |
c3b5b024 | 220 | |
b36b5059 HG |
221 | if (sub->type == V4L2_EVENT_ALL) |
222 | return -EINVAL; | |
223 | ||
f1e393de HV |
224 | if (elems < 1) |
225 | elems = 1; | |
6e239399 | 226 | |
c817e6cc | 227 | sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL); |
c3b5b024 SA |
228 | if (!sev) |
229 | return -ENOMEM; | |
f1e393de HV |
230 | for (i = 0; i < elems; i++) |
231 | sev->events[i].sev = sev; | |
232 | sev->type = sub->type; | |
233 | sev->id = sub->id; | |
234 | sev->flags = sub->flags; | |
235 | sev->fh = fh; | |
c53c2549 | 236 | sev->ops = ops; |
ad608fbc SA |
237 | sev->elems = elems; |
238 | ||
239 | mutex_lock(&fh->subscribe_lock); | |
c3b5b024 SA |
240 | |
241 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); | |
6e239399 | 242 | found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); |
92539d3e SA |
243 | if (!found_ev) |
244 | list_add(&sev->list, &fh->subscribed); | |
c3b5b024 SA |
245 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); |
246 | ||
c53c2549 | 247 | if (found_ev) { |
ad608fbc | 248 | /* Already listening */ |
758d90e1 | 249 | kvfree(sev); |
92539d3e | 250 | } else if (sev->ops && sev->ops->add) { |
ad608fbc | 251 | ret = sev->ops->add(sev, elems); |
c53c2549 | 252 | if (ret) { |
92539d3e SA |
253 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); |
254 | __v4l2_event_unsubscribe(sev); | |
255 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); | |
ad608fbc | 256 | kvfree(sev); |
c53c2549 HG |
257 | } |
258 | } | |
259 | ||
ad608fbc SA |
260 | mutex_unlock(&fh->subscribe_lock); |
261 | ||
262 | return ret; | |
c3b5b024 SA |
263 | } |
264 | EXPORT_SYMBOL_GPL(v4l2_event_subscribe); | |
265 | ||
f1e393de | 266 | void v4l2_event_unsubscribe_all(struct v4l2_fh *fh) |
c3b5b024 | 267 | { |
6e239399 | 268 | struct v4l2_event_subscription sub; |
c3b5b024 SA |
269 | struct v4l2_subscribed_event *sev; |
270 | unsigned long flags; | |
271 | ||
272 | do { | |
273 | sev = NULL; | |
274 | ||
275 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); | |
523f46d6 HV |
276 | if (!list_empty(&fh->subscribed)) { |
277 | sev = list_first_entry(&fh->subscribed, | |
6e239399 HV |
278 | struct v4l2_subscribed_event, list); |
279 | sub.type = sev->type; | |
280 | sub.id = sev->id; | |
c3b5b024 SA |
281 | } |
282 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); | |
6e239399 HV |
283 | if (sev) |
284 | v4l2_event_unsubscribe(fh, &sub); | |
c3b5b024 SA |
285 | } while (sev); |
286 | } | |
f1e393de | 287 | EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all); |
c3b5b024 SA |
288 | |
289 | int v4l2_event_unsubscribe(struct v4l2_fh *fh, | |
85f5fe39 | 290 | const struct v4l2_event_subscription *sub) |
c3b5b024 SA |
291 | { |
292 | struct v4l2_subscribed_event *sev; | |
293 | unsigned long flags; | |
294 | ||
295 | if (sub->type == V4L2_EVENT_ALL) { | |
296 | v4l2_event_unsubscribe_all(fh); | |
297 | return 0; | |
298 | } | |
299 | ||
ad608fbc SA |
300 | mutex_lock(&fh->subscribe_lock); |
301 | ||
c3b5b024 SA |
302 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); |
303 | ||
6e239399 | 304 | sev = v4l2_event_subscribed(fh, sub->type, sub->id); |
92539d3e SA |
305 | if (sev != NULL) |
306 | __v4l2_event_unsubscribe(sev); | |
c3b5b024 SA |
307 | |
308 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); | |
c53c2549 HG |
309 | |
310 | if (sev && sev->ops && sev->ops->del) | |
311 | sev->ops->del(sev); | |
312 | ||
ad608fbc SA |
313 | mutex_unlock(&fh->subscribe_lock); |
314 | ||
758d90e1 | 315 | kvfree(sev); |
c3b5b024 SA |
316 | |
317 | return 0; | |
318 | } | |
319 | EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe); | |
4f4d14b7 SN |
320 | |
321 | int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh, | |
322 | struct v4l2_event_subscription *sub) | |
323 | { | |
324 | return v4l2_event_unsubscribe(fh, sub); | |
325 | } | |
326 | EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe); | |
3cbe6e5b AK |
327 | |
328 | static void v4l2_event_src_replace(struct v4l2_event *old, | |
329 | const struct v4l2_event *new) | |
330 | { | |
331 | u32 old_changes = old->u.src_change.changes; | |
332 | ||
333 | old->u.src_change = new->u.src_change; | |
334 | old->u.src_change.changes |= old_changes; | |
335 | } | |
336 | ||
337 | static void v4l2_event_src_merge(const struct v4l2_event *old, | |
338 | struct v4l2_event *new) | |
339 | { | |
340 | new->u.src_change.changes |= old->u.src_change.changes; | |
341 | } | |
342 | ||
343 | static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = { | |
344 | .replace = v4l2_event_src_replace, | |
345 | .merge = v4l2_event_src_merge, | |
346 | }; | |
347 | ||
348 | int v4l2_src_change_event_subscribe(struct v4l2_fh *fh, | |
349 | const struct v4l2_event_subscription *sub) | |
350 | { | |
351 | if (sub->type == V4L2_EVENT_SOURCE_CHANGE) | |
352 | return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops); | |
353 | return -EINVAL; | |
354 | } | |
355 | EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe); | |
356 | ||
357 | int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd, | |
358 | struct v4l2_fh *fh, struct v4l2_event_subscription *sub) | |
359 | { | |
360 | return v4l2_src_change_event_subscribe(fh, sub); | |
361 | } | |
362 | EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe); |