]>
Commit | Line | Data |
---|---|---|
c3b5b024 SA |
1 | /* |
2 | * v4l2-event.c | |
3 | * | |
4 | * V4L2 events. | |
5 | * | |
6 | * Copyright (C) 2009--2010 Nokia Corporation. | |
7 | * | |
8 | * Contact: Sakari Ailus <[email protected]> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License | |
12 | * version 2 as published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | |
22 | * 02110-1301 USA | |
23 | */ | |
24 | ||
25 | #include <media/v4l2-dev.h> | |
26 | #include <media/v4l2-fh.h> | |
27 | #include <media/v4l2-event.h> | |
6e239399 | 28 | #include <media/v4l2-ctrls.h> |
c3b5b024 SA |
29 | |
30 | #include <linux/sched.h> | |
31 | #include <linux/slab.h> | |
32 | ||
f1e393de | 33 | static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx) |
c3b5b024 | 34 | { |
f1e393de HV |
35 | idx += sev->first; |
36 | return idx >= sev->elems ? idx - sev->elems : idx; | |
c3b5b024 | 37 | } |
c3b5b024 SA |
38 | |
39 | static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event) | |
40 | { | |
c3b5b024 SA |
41 | struct v4l2_kevent *kev; |
42 | unsigned long flags; | |
43 | ||
44 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); | |
45 | ||
523f46d6 | 46 | if (list_empty(&fh->available)) { |
c3b5b024 SA |
47 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); |
48 | return -ENOENT; | |
49 | } | |
50 | ||
523f46d6 | 51 | WARN_ON(fh->navailable == 0); |
c3b5b024 | 52 | |
523f46d6 | 53 | kev = list_first_entry(&fh->available, struct v4l2_kevent, list); |
f1e393de | 54 | list_del(&kev->list); |
523f46d6 | 55 | fh->navailable--; |
c3b5b024 | 56 | |
523f46d6 | 57 | kev->event.pending = fh->navailable; |
c3b5b024 | 58 | *event = kev->event; |
f1e393de HV |
59 | kev->sev->first = sev_pos(kev->sev, 1); |
60 | kev->sev->in_use--; | |
c3b5b024 SA |
61 | |
62 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); | |
63 | ||
64 | return 0; | |
65 | } | |
66 | ||
67 | int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, | |
68 | int nonblocking) | |
69 | { | |
c3b5b024 SA |
70 | int ret; |
71 | ||
72 | if (nonblocking) | |
73 | return __v4l2_event_dequeue(fh, event); | |
74 | ||
ee6869af HV |
75 | /* Release the vdev lock while waiting */ |
76 | if (fh->vdev->lock) | |
77 | mutex_unlock(fh->vdev->lock); | |
78 | ||
c3b5b024 | 79 | do { |
523f46d6 HV |
80 | ret = wait_event_interruptible(fh->wait, |
81 | fh->navailable != 0); | |
c3b5b024 | 82 | if (ret < 0) |
ee6869af | 83 | break; |
c3b5b024 SA |
84 | |
85 | ret = __v4l2_event_dequeue(fh, event); | |
86 | } while (ret == -ENOENT); | |
87 | ||
ee6869af HV |
88 | if (fh->vdev->lock) |
89 | mutex_lock(fh->vdev->lock); | |
90 | ||
c3b5b024 SA |
91 | return ret; |
92 | } | |
0a4f8d07 | 93 | EXPORT_SYMBOL_GPL(v4l2_event_dequeue); |
c3b5b024 | 94 | |
6e239399 | 95 | /* Caller must hold fh->vdev->fh_lock! */ |
c3b5b024 | 96 | static struct v4l2_subscribed_event *v4l2_event_subscribed( |
6e239399 | 97 | struct v4l2_fh *fh, u32 type, u32 id) |
c3b5b024 | 98 | { |
c3b5b024 SA |
99 | struct v4l2_subscribed_event *sev; |
100 | ||
f3cd385a | 101 | assert_spin_locked(&fh->vdev->fh_lock); |
c3b5b024 | 102 | |
3f66f0ed | 103 | list_for_each_entry(sev, &fh->subscribed, list) |
6e239399 | 104 | if (sev->type == type && sev->id == id) |
c3b5b024 | 105 | return sev; |
c3b5b024 SA |
106 | |
107 | return NULL; | |
108 | } | |
109 | ||
6e239399 HV |
110 | static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev, |
111 | const struct timespec *ts) | |
112 | { | |
6e239399 HV |
113 | struct v4l2_subscribed_event *sev; |
114 | struct v4l2_kevent *kev; | |
2151bdc8 | 115 | bool copy_payload = true; |
6e239399 HV |
116 | |
117 | /* Are we subscribed? */ | |
118 | sev = v4l2_event_subscribed(fh, ev->type, ev->id); | |
119 | if (sev == NULL) | |
120 | return; | |
121 | ||
122 | /* Increase event sequence number on fh. */ | |
523f46d6 | 123 | fh->sequence++; |
6e239399 HV |
124 | |
125 | /* Do we have any free events? */ | |
f1e393de HV |
126 | if (sev->in_use == sev->elems) { |
127 | /* no, remove the oldest one */ | |
128 | kev = sev->events + sev_pos(sev, 0); | |
129 | list_del(&kev->list); | |
130 | sev->in_use--; | |
131 | sev->first = sev_pos(sev, 1); | |
132 | fh->navailable--; | |
2151bdc8 HV |
133 | if (sev->elems == 1) { |
134 | if (sev->replace) { | |
135 | sev->replace(&kev->event, ev); | |
136 | copy_payload = false; | |
137 | } | |
138 | } else if (sev->merge) { | |
139 | struct v4l2_kevent *second_oldest = | |
140 | sev->events + sev_pos(sev, 0); | |
141 | sev->merge(&kev->event, &second_oldest->event); | |
142 | } | |
f1e393de | 143 | } |
6e239399 HV |
144 | |
145 | /* Take one and fill it. */ | |
f1e393de | 146 | kev = sev->events + sev_pos(sev, sev->in_use); |
6e239399 | 147 | kev->event.type = ev->type; |
2151bdc8 HV |
148 | if (copy_payload) |
149 | kev->event.u = ev->u; | |
6e239399 HV |
150 | kev->event.id = ev->id; |
151 | kev->event.timestamp = *ts; | |
523f46d6 | 152 | kev->event.sequence = fh->sequence; |
f1e393de HV |
153 | sev->in_use++; |
154 | list_add_tail(&kev->list, &fh->available); | |
6e239399 | 155 | |
523f46d6 | 156 | fh->navailable++; |
6e239399 | 157 | |
523f46d6 | 158 | wake_up_all(&fh->wait); |
6e239399 HV |
159 | } |
160 | ||
c3b5b024 SA |
161 | void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev) |
162 | { | |
163 | struct v4l2_fh *fh; | |
164 | unsigned long flags; | |
165 | struct timespec timestamp; | |
166 | ||
167 | ktime_get_ts(×tamp); | |
168 | ||
169 | spin_lock_irqsave(&vdev->fh_lock, flags); | |
170 | ||
3f66f0ed | 171 | list_for_each_entry(fh, &vdev->fh_list, list) |
6e239399 | 172 | __v4l2_event_queue_fh(fh, ev, ×tamp); |
c3b5b024 SA |
173 | |
174 | spin_unlock_irqrestore(&vdev->fh_lock, flags); | |
175 | } | |
176 | EXPORT_SYMBOL_GPL(v4l2_event_queue); | |
177 | ||
6e239399 HV |
178 | void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev) |
179 | { | |
180 | unsigned long flags; | |
181 | struct timespec timestamp; | |
182 | ||
183 | ktime_get_ts(×tamp); | |
184 | ||
185 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); | |
186 | __v4l2_event_queue_fh(fh, ev, ×tamp); | |
187 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); | |
188 | } | |
189 | EXPORT_SYMBOL_GPL(v4l2_event_queue_fh); | |
190 | ||
c3b5b024 SA |
191 | int v4l2_event_pending(struct v4l2_fh *fh) |
192 | { | |
523f46d6 | 193 | return fh->navailable; |
c3b5b024 SA |
194 | } |
195 | EXPORT_SYMBOL_GPL(v4l2_event_pending); | |
196 | ||
2151bdc8 HV |
197 | static void ctrls_replace(struct v4l2_event *old, const struct v4l2_event *new) |
198 | { | |
199 | u32 old_changes = old->u.ctrl.changes; | |
200 | ||
201 | old->u.ctrl = new->u.ctrl; | |
202 | old->u.ctrl.changes |= old_changes; | |
203 | } | |
204 | ||
205 | static void ctrls_merge(const struct v4l2_event *old, struct v4l2_event *new) | |
206 | { | |
207 | new->u.ctrl.changes |= old->u.ctrl.changes; | |
208 | } | |
209 | ||
c3b5b024 | 210 | int v4l2_event_subscribe(struct v4l2_fh *fh, |
f1e393de | 211 | struct v4l2_event_subscription *sub, unsigned elems) |
c3b5b024 | 212 | { |
6e239399 HV |
213 | struct v4l2_subscribed_event *sev, *found_ev; |
214 | struct v4l2_ctrl *ctrl = NULL; | |
c3b5b024 | 215 | unsigned long flags; |
f1e393de | 216 | unsigned i; |
c3b5b024 | 217 | |
f1e393de HV |
218 | if (elems < 1) |
219 | elems = 1; | |
6e239399 HV |
220 | if (sub->type == V4L2_EVENT_CTRL) { |
221 | ctrl = v4l2_ctrl_find(fh->ctrl_handler, sub->id); | |
222 | if (ctrl == NULL) | |
223 | return -EINVAL; | |
224 | } | |
225 | ||
f1e393de | 226 | sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL); |
c3b5b024 SA |
227 | if (!sev) |
228 | return -ENOMEM; | |
f1e393de HV |
229 | for (i = 0; i < elems; i++) |
230 | sev->events[i].sev = sev; | |
231 | sev->type = sub->type; | |
232 | sev->id = sub->id; | |
233 | sev->flags = sub->flags; | |
234 | sev->fh = fh; | |
235 | sev->elems = elems; | |
2151bdc8 HV |
236 | if (ctrl) { |
237 | sev->replace = ctrls_replace; | |
238 | sev->merge = ctrls_merge; | |
239 | } | |
c3b5b024 SA |
240 | |
241 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); | |
6e239399 | 242 | found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); |
f1e393de | 243 | if (!found_ev) |
523f46d6 | 244 | list_add(&sev->list, &fh->subscribed); |
c3b5b024 SA |
245 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); |
246 | ||
f1e393de | 247 | /* v4l2_ctrl_add_event uses a mutex, so do this outside the spin lock */ |
77068d36 HV |
248 | if (found_ev) |
249 | kfree(sev); | |
250 | else if (ctrl) | |
251 | v4l2_ctrl_add_event(ctrl, sev); | |
c3b5b024 SA |
252 | |
253 | return 0; | |
254 | } | |
255 | EXPORT_SYMBOL_GPL(v4l2_event_subscribe); | |
256 | ||
f1e393de | 257 | void v4l2_event_unsubscribe_all(struct v4l2_fh *fh) |
c3b5b024 | 258 | { |
6e239399 | 259 | struct v4l2_event_subscription sub; |
c3b5b024 SA |
260 | struct v4l2_subscribed_event *sev; |
261 | unsigned long flags; | |
262 | ||
263 | do { | |
264 | sev = NULL; | |
265 | ||
266 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); | |
523f46d6 HV |
267 | if (!list_empty(&fh->subscribed)) { |
268 | sev = list_first_entry(&fh->subscribed, | |
6e239399 HV |
269 | struct v4l2_subscribed_event, list); |
270 | sub.type = sev->type; | |
271 | sub.id = sev->id; | |
c3b5b024 SA |
272 | } |
273 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); | |
6e239399 HV |
274 | if (sev) |
275 | v4l2_event_unsubscribe(fh, &sub); | |
c3b5b024 SA |
276 | } while (sev); |
277 | } | |
f1e393de | 278 | EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all); |
c3b5b024 SA |
279 | |
280 | int v4l2_event_unsubscribe(struct v4l2_fh *fh, | |
281 | struct v4l2_event_subscription *sub) | |
282 | { | |
283 | struct v4l2_subscribed_event *sev; | |
284 | unsigned long flags; | |
285 | ||
286 | if (sub->type == V4L2_EVENT_ALL) { | |
287 | v4l2_event_unsubscribe_all(fh); | |
288 | return 0; | |
289 | } | |
290 | ||
291 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); | |
292 | ||
6e239399 | 293 | sev = v4l2_event_subscribed(fh, sub->type, sub->id); |
77068d36 | 294 | if (sev != NULL) { |
c3b5b024 | 295 | list_del(&sev->list); |
77068d36 HV |
296 | sev->fh = NULL; |
297 | } | |
c3b5b024 SA |
298 | |
299 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); | |
77068d36 | 300 | if (sev && sev->type == V4L2_EVENT_CTRL) { |
6e239399 HV |
301 | struct v4l2_ctrl *ctrl = v4l2_ctrl_find(fh->ctrl_handler, sev->id); |
302 | ||
303 | if (ctrl) | |
77068d36 | 304 | v4l2_ctrl_del_event(ctrl, sev); |
6e239399 | 305 | } |
c3b5b024 SA |
306 | |
307 | kfree(sev); | |
308 | ||
309 | return 0; | |
310 | } | |
311 | EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe); |