]>
Commit | Line | Data |
---|---|---|
7f98639d PO |
1 | /* |
2 | * Memory-to-memory device framework for Video for Linux 2 and videobuf. | |
3 | * | |
4 | * Helper functions for devices that use videobuf buffers for both their | |
5 | * source and destination. | |
6 | * | |
7 | * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. | |
95072084 | 8 | * Pawel Osciak, <[email protected]> |
7f98639d PO |
9 | * Marek Szyprowski, <[email protected]> |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify | |
12 | * it under the terms of the GNU General Public License as published by the | |
13 | * Free Software Foundation; either version 2 of the License, or (at your | |
14 | * option) any later version. | |
15 | */ | |
16 | #include <linux/module.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/slab.h> | |
19 | ||
be2fff65 | 20 | #include <media/media-device.h> |
c139990e | 21 | #include <media/videobuf2-v4l2.h> |
7f98639d | 22 | #include <media/v4l2-mem2mem.h> |
08eb8510 | 23 | #include <media/v4l2-dev.h> |
be2fff65 | 24 | #include <media/v4l2-device.h> |
08eb8510 HV |
25 | #include <media/v4l2-fh.h> |
26 | #include <media/v4l2-event.h> | |
7f98639d PO |
27 | |
28 | MODULE_DESCRIPTION("Mem to mem device framework for videobuf"); | |
95072084 | 29 | MODULE_AUTHOR("Pawel Osciak, <[email protected]>"); |
7f98639d PO |
30 | MODULE_LICENSE("GPL"); |
31 | ||
32 | static bool debug; | |
33 | module_param(debug, bool, 0644); | |
34 | ||
35 | #define dprintk(fmt, arg...) \ | |
36 | do { \ | |
37 | if (debug) \ | |
38 | printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ | |
39 | } while (0) | |
40 | ||
41 | ||
42 | /* Instance is already queued on the job_queue */ | |
43 | #define TRANS_QUEUED (1 << 0) | |
44 | /* Instance is currently running in hardware */ | |
45 | #define TRANS_RUNNING (1 << 1) | |
2ad5389b SAB |
46 | /* Instance is currently aborting */ |
47 | #define TRANS_ABORT (1 << 2) | |
7f98639d PO |
48 | |
49 | ||
50 | /* Offset base for buffers on the destination queue - used to distinguish | |
51 | * between source and destination buffers when mmapping - they receive the same | |
52 | * offsets but for different queues */ | |
53 | #define DST_QUEUE_OFF_BASE (1 << 30) | |
54 | ||
be2fff65 EG |
55 | enum v4l2_m2m_entity_type { |
56 | MEM2MEM_ENT_TYPE_SOURCE, | |
57 | MEM2MEM_ENT_TYPE_SINK, | |
58 | MEM2MEM_ENT_TYPE_PROC | |
59 | }; | |
60 | ||
61 | static const char * const m2m_entity_name[] = { | |
62 | "source", | |
63 | "sink", | |
64 | "proc" | |
65 | }; | |
7f98639d PO |
66 | |
67 | /** | |
68 | * struct v4l2_m2m_dev - per-device context | |
b07b6849 MCC |
69 | * @source: &struct media_entity pointer with the source entity |
70 | * Used only when the M2M device is registered via | |
71 | * v4l2_m2m_unregister_media_controller(). | |
72 | * @source_pad: &struct media_pad with the source pad. | |
73 | * Used only when the M2M device is registered via | |
74 | * v4l2_m2m_unregister_media_controller(). | |
75 | * @sink: &struct media_entity pointer with the sink entity | |
76 | * Used only when the M2M device is registered via | |
77 | * v4l2_m2m_unregister_media_controller(). | |
78 | * @sink_pad: &struct media_pad with the sink pad. | |
79 | * Used only when the M2M device is registered via | |
80 | * v4l2_m2m_unregister_media_controller(). | |
81 | * @proc: &struct media_entity pointer with the M2M device itself. | |
82 | * @proc_pads: &struct media_pad with the @proc pads. | |
83 | * Used only when the M2M device is registered via | |
84 | * v4l2_m2m_unregister_media_controller(). | |
85 | * @intf_devnode: &struct media_intf devnode pointer with the interface | |
86 | * with controls the M2M device. | |
7f98639d PO |
87 | * @curr_ctx: currently running instance |
88 | * @job_queue: instances queued to run | |
89 | * @job_spinlock: protects job_queue | |
90 | * @m2m_ops: driver callbacks | |
91 | */ | |
92 | struct v4l2_m2m_dev { | |
93 | struct v4l2_m2m_ctx *curr_ctx; | |
be2fff65 EG |
94 | #ifdef CONFIG_MEDIA_CONTROLLER |
95 | struct media_entity *source; | |
96 | struct media_pad source_pad; | |
97 | struct media_entity sink; | |
98 | struct media_pad sink_pad; | |
99 | struct media_entity proc; | |
100 | struct media_pad proc_pads[2]; | |
101 | struct media_intf_devnode *intf_devnode; | |
102 | #endif | |
7f98639d PO |
103 | |
104 | struct list_head job_queue; | |
105 | spinlock_t job_spinlock; | |
106 | ||
b1252eb8 | 107 | const struct v4l2_m2m_ops *m2m_ops; |
7f98639d PO |
108 | }; |
109 | ||
110 | static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, | |
111 | enum v4l2_buf_type type) | |
112 | { | |
908a0d7c | 113 | if (V4L2_TYPE_IS_OUTPUT(type)) |
7f98639d | 114 | return &m2m_ctx->out_q_ctx; |
908a0d7c MS |
115 | else |
116 | return &m2m_ctx->cap_q_ctx; | |
7f98639d PO |
117 | } |
118 | ||
908a0d7c | 119 | struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, |
7f98639d PO |
120 | enum v4l2_buf_type type) |
121 | { | |
122 | struct v4l2_m2m_queue_ctx *q_ctx; | |
123 | ||
124 | q_ctx = get_queue_ctx(m2m_ctx, type); | |
125 | if (!q_ctx) | |
126 | return NULL; | |
127 | ||
128 | return &q_ctx->q; | |
129 | } | |
130 | EXPORT_SYMBOL(v4l2_m2m_get_vq); | |
131 | ||
908a0d7c | 132 | void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) |
7f98639d | 133 | { |
d5451c1d | 134 | struct v4l2_m2m_buffer *b; |
7f98639d PO |
135 | unsigned long flags; |
136 | ||
908a0d7c | 137 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
7f98639d | 138 | |
a6bd62be AP |
139 | if (list_empty(&q_ctx->rdy_queue)) { |
140 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); | |
141 | return NULL; | |
142 | } | |
7f98639d | 143 | |
c392e9e1 | 144 | b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); |
908a0d7c MS |
145 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
146 | return &b->vb; | |
7f98639d PO |
147 | } |
148 | EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); | |
149 | ||
ee1228cc HV |
150 | void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx) |
151 | { | |
152 | struct v4l2_m2m_buffer *b; | |
153 | unsigned long flags; | |
154 | ||
155 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); | |
156 | ||
157 | if (list_empty(&q_ctx->rdy_queue)) { | |
158 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); | |
159 | return NULL; | |
160 | } | |
161 | ||
162 | b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); | |
163 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); | |
164 | return &b->vb; | |
165 | } | |
166 | EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf); | |
167 | ||
908a0d7c | 168 | void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) |
7f98639d | 169 | { |
d5451c1d | 170 | struct v4l2_m2m_buffer *b; |
7f98639d PO |
171 | unsigned long flags; |
172 | ||
908a0d7c | 173 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
a6bd62be AP |
174 | if (list_empty(&q_ctx->rdy_queue)) { |
175 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); | |
176 | return NULL; | |
7f98639d | 177 | } |
c392e9e1 | 178 | b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); |
a6bd62be AP |
179 | list_del(&b->list); |
180 | q_ctx->num_rdy--; | |
908a0d7c | 181 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
7f98639d | 182 | |
908a0d7c | 183 | return &b->vb; |
7f98639d PO |
184 | } |
185 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); | |
186 | ||
d4987564 SV |
187 | void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, |
188 | struct vb2_v4l2_buffer *vbuf) | |
189 | { | |
190 | struct v4l2_m2m_buffer *b; | |
191 | unsigned long flags; | |
192 | ||
193 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); | |
194 | b = container_of(vbuf, struct v4l2_m2m_buffer, vb); | |
195 | list_del(&b->list); | |
196 | q_ctx->num_rdy--; | |
197 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); | |
198 | } | |
199 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf); | |
200 | ||
201 | struct vb2_v4l2_buffer * | |
202 | v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx) | |
203 | ||
204 | { | |
205 | struct v4l2_m2m_buffer *b, *tmp; | |
206 | struct vb2_v4l2_buffer *ret = NULL; | |
207 | unsigned long flags; | |
208 | ||
209 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); | |
210 | list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) { | |
211 | if (b->vb.vb2_buf.index == idx) { | |
212 | list_del(&b->list); | |
213 | q_ctx->num_rdy--; | |
214 | ret = &b->vb; | |
215 | break; | |
216 | } | |
217 | } | |
218 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); | |
219 | ||
220 | return ret; | |
221 | } | |
222 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx); | |
223 | ||
7f98639d PO |
224 | /* |
225 | * Scheduling handlers | |
226 | */ | |
227 | ||
7f98639d PO |
228 | void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) |
229 | { | |
230 | unsigned long flags; | |
231 | void *ret = NULL; | |
232 | ||
233 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); | |
234 | if (m2m_dev->curr_ctx) | |
235 | ret = m2m_dev->curr_ctx->priv; | |
236 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | |
237 | ||
238 | return ret; | |
239 | } | |
240 | EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); | |
241 | ||
242 | /** | |
243 | * v4l2_m2m_try_run() - select next job to perform and run it if possible | |
d28b2cf9 | 244 | * @m2m_dev: per-device context |
7f98639d PO |
245 | * |
246 | * Get next transaction (if present) from the waiting jobs list and run it. | |
247 | */ | |
248 | static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) | |
249 | { | |
250 | unsigned long flags; | |
251 | ||
252 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); | |
253 | if (NULL != m2m_dev->curr_ctx) { | |
254 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | |
255 | dprintk("Another instance is running, won't run now\n"); | |
256 | return; | |
257 | } | |
258 | ||
259 | if (list_empty(&m2m_dev->job_queue)) { | |
260 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | |
261 | dprintk("No job pending\n"); | |
262 | return; | |
263 | } | |
264 | ||
c392e9e1 | 265 | m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, |
7f98639d PO |
266 | struct v4l2_m2m_ctx, queue); |
267 | m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; | |
268 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | |
269 | ||
9db3bbf5 | 270 | dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx); |
7f98639d PO |
271 | m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); |
272 | } | |
273 | ||
9db3bbf5 EG |
274 | /* |
275 | * __v4l2_m2m_try_queue() - queue a job | |
276 | * @m2m_dev: m2m device | |
277 | * @m2m_ctx: m2m context | |
278 | * | |
279 | * Check if this context is ready to queue a job. | |
280 | * | |
281 | * This function can run in interrupt context. | |
282 | */ | |
283 | static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev, | |
284 | struct v4l2_m2m_ctx *m2m_ctx) | |
7f98639d | 285 | { |
b730627a | 286 | unsigned long flags_job, flags_out, flags_cap; |
7f98639d | 287 | |
7f98639d PO |
288 | dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); |
289 | ||
290 | if (!m2m_ctx->out_q_ctx.q.streaming | |
291 | || !m2m_ctx->cap_q_ctx.q.streaming) { | |
292 | dprintk("Streaming needs to be on for both queues\n"); | |
293 | return; | |
294 | } | |
295 | ||
296 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); | |
2ad5389b SAB |
297 | |
298 | /* If the context is aborted then don't schedule it */ | |
299 | if (m2m_ctx->job_flags & TRANS_ABORT) { | |
300 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); | |
301 | dprintk("Aborted context\n"); | |
302 | return; | |
303 | } | |
304 | ||
7f98639d PO |
305 | if (m2m_ctx->job_flags & TRANS_QUEUED) { |
306 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); | |
307 | dprintk("On job queue already\n"); | |
308 | return; | |
309 | } | |
310 | ||
b730627a | 311 | spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); |
33bdd5a8 PZ |
312 | if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue) |
313 | && !m2m_ctx->out_q_ctx.buffered) { | |
b730627a JS |
314 | spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, |
315 | flags_out); | |
7f98639d PO |
316 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
317 | dprintk("No input buffers available\n"); | |
318 | return; | |
319 | } | |
b730627a | 320 | spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); |
33bdd5a8 PZ |
321 | if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue) |
322 | && !m2m_ctx->cap_q_ctx.buffered) { | |
b730627a JS |
323 | spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, |
324 | flags_cap); | |
325 | spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, | |
326 | flags_out); | |
7f98639d PO |
327 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
328 | dprintk("No output buffers available\n"); | |
329 | return; | |
330 | } | |
b730627a JS |
331 | spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); |
332 | spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); | |
7f98639d PO |
333 | |
334 | if (m2m_dev->m2m_ops->job_ready | |
335 | && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { | |
336 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); | |
337 | dprintk("Driver not ready\n"); | |
338 | return; | |
339 | } | |
340 | ||
341 | list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); | |
342 | m2m_ctx->job_flags |= TRANS_QUEUED; | |
343 | ||
344 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); | |
9db3bbf5 EG |
345 | } |
346 | ||
347 | /** | |
348 | * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context | |
349 | * @m2m_ctx: m2m context | |
350 | * | |
351 | * Check if this context is ready to queue a job. If suitable, | |
352 | * run the next queued job on the mem2mem device. | |
353 | * | |
354 | * This function shouldn't run in interrupt context. | |
355 | * | |
356 | * Note that v4l2_m2m_try_schedule() can schedule one job for this context, | |
357 | * and then run another job for another context. | |
358 | */ | |
359 | void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) | |
360 | { | |
361 | struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev; | |
7f98639d | 362 | |
9db3bbf5 | 363 | __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); |
7f98639d PO |
364 | v4l2_m2m_try_run(m2m_dev); |
365 | } | |
1190a419 | 366 | EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule); |
7f98639d | 367 | |
fea564a5 SAB |
368 | /** |
369 | * v4l2_m2m_cancel_job() - cancel pending jobs for the context | |
d28b2cf9 | 370 | * @m2m_ctx: m2m context with jobs to be canceled |
fea564a5 SAB |
371 | * |
372 | * In case of streamoff or release called on any context, | |
373 | * 1] If the context is currently running, then abort job will be called | |
374 | * 2] If the context is queued, then the context will be removed from | |
375 | * the job_queue | |
376 | */ | |
377 | static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) | |
378 | { | |
379 | struct v4l2_m2m_dev *m2m_dev; | |
380 | unsigned long flags; | |
381 | ||
382 | m2m_dev = m2m_ctx->m2m_dev; | |
383 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); | |
2ad5389b SAB |
384 | |
385 | m2m_ctx->job_flags |= TRANS_ABORT; | |
fea564a5 SAB |
386 | if (m2m_ctx->job_flags & TRANS_RUNNING) { |
387 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | |
5525b831 EG |
388 | if (m2m_dev->m2m_ops->job_abort) |
389 | m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); | |
803a7ab7 | 390 | dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx); |
fea564a5 SAB |
391 | wait_event(m2m_ctx->finished, |
392 | !(m2m_ctx->job_flags & TRANS_RUNNING)); | |
393 | } else if (m2m_ctx->job_flags & TRANS_QUEUED) { | |
394 | list_del(&m2m_ctx->queue); | |
395 | m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); | |
396 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | |
397 | dprintk("m2m_ctx: %p had been on queue and was removed\n", | |
398 | m2m_ctx); | |
399 | } else { | |
400 | /* Do nothing, was not on queue/running */ | |
401 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | |
402 | } | |
403 | } | |
404 | ||
7f98639d PO |
405 | void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, |
406 | struct v4l2_m2m_ctx *m2m_ctx) | |
407 | { | |
408 | unsigned long flags; | |
409 | ||
410 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); | |
411 | if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { | |
412 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | |
413 | dprintk("Called by an instance not currently running\n"); | |
414 | return; | |
415 | } | |
416 | ||
417 | list_del(&m2m_dev->curr_ctx->queue); | |
418 | m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); | |
908a0d7c | 419 | wake_up(&m2m_dev->curr_ctx->finished); |
7f98639d PO |
420 | m2m_dev->curr_ctx = NULL; |
421 | ||
422 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | |
423 | ||
424 | /* This instance might have more buffers ready, but since we do not | |
425 | * allow more than one job on the job_queue per instance, each has | |
426 | * to be scheduled separately after the previous one finishes. */ | |
427 | v4l2_m2m_try_schedule(m2m_ctx); | |
7f98639d PO |
428 | } |
429 | EXPORT_SYMBOL(v4l2_m2m_job_finish); | |
430 | ||
7f98639d PO |
431 | int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
432 | struct v4l2_requestbuffers *reqbufs) | |
433 | { | |
908a0d7c | 434 | struct vb2_queue *vq; |
c13a5ccf | 435 | int ret; |
7f98639d PO |
436 | |
437 | vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); | |
c13a5ccf PZ |
438 | ret = vb2_reqbufs(vq, reqbufs); |
439 | /* If count == 0, then the owner has released all buffers and he | |
440 | is no longer owner of the queue. Otherwise we have an owner. */ | |
441 | if (ret == 0) | |
442 | vq->owner = reqbufs->count ? file->private_data : NULL; | |
443 | ||
444 | return ret; | |
7f98639d PO |
445 | } |
446 | EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); | |
447 | ||
7f98639d PO |
448 | int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
449 | struct v4l2_buffer *buf) | |
450 | { | |
908a0d7c MS |
451 | struct vb2_queue *vq; |
452 | int ret = 0; | |
453 | unsigned int i; | |
7f98639d PO |
454 | |
455 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); | |
908a0d7c MS |
456 | ret = vb2_querybuf(vq, buf); |
457 | ||
458 | /* Adjust MMAP memory offsets for the CAPTURE queue */ | |
459 | if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) { | |
460 | if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { | |
461 | for (i = 0; i < buf->length; ++i) | |
462 | buf->m.planes[i].m.mem_offset | |
463 | += DST_QUEUE_OFF_BASE; | |
464 | } else { | |
465 | buf->m.offset += DST_QUEUE_OFF_BASE; | |
466 | } | |
7f98639d PO |
467 | } |
468 | ||
469 | return ret; | |
470 | } | |
471 | EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); | |
472 | ||
7f98639d PO |
473 | int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
474 | struct v4l2_buffer *buf) | |
475 | { | |
394dc588 | 476 | struct video_device *vdev = video_devdata(file); |
908a0d7c | 477 | struct vb2_queue *vq; |
7f98639d PO |
478 | int ret; |
479 | ||
480 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); | |
803a7ab7 HV |
481 | if (!V4L2_TYPE_IS_OUTPUT(vq->type) && |
482 | (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) { | |
483 | dprintk("%s: requests cannot be used with capture buffers\n", | |
484 | __func__); | |
485 | return -EPERM; | |
486 | } | |
394dc588 | 487 | ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf); |
803a7ab7 | 488 | if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) |
7f98639d PO |
489 | v4l2_m2m_try_schedule(m2m_ctx); |
490 | ||
491 | return ret; | |
492 | } | |
493 | EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); | |
494 | ||
7f98639d PO |
495 | int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
496 | struct v4l2_buffer *buf) | |
497 | { | |
908a0d7c | 498 | struct vb2_queue *vq; |
7f98639d PO |
499 | |
500 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); | |
908a0d7c | 501 | return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); |
7f98639d PO |
502 | } |
503 | EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); | |
504 | ||
e68cf471 HV |
505 | int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
506 | struct v4l2_buffer *buf) | |
507 | { | |
394dc588 | 508 | struct video_device *vdev = video_devdata(file); |
e68cf471 | 509 | struct vb2_queue *vq; |
e68cf471 HV |
510 | |
511 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); | |
803a7ab7 | 512 | return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); |
e68cf471 HV |
513 | } |
514 | EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); | |
515 | ||
8b94ca61 PZ |
516 | int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
517 | struct v4l2_create_buffers *create) | |
518 | { | |
519 | struct vb2_queue *vq; | |
520 | ||
521 | vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); | |
522 | return vb2_create_bufs(vq, create); | |
523 | } | |
524 | EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); | |
525 | ||
83ae7c5a TS |
526 | int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
527 | struct v4l2_exportbuffer *eb) | |
528 | { | |
529 | struct vb2_queue *vq; | |
530 | ||
531 | vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); | |
532 | return vb2_expbuf(vq, eb); | |
533 | } | |
534 | EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); | |
4781646c | 535 | |
7f98639d PO |
536 | int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
537 | enum v4l2_buf_type type) | |
538 | { | |
908a0d7c | 539 | struct vb2_queue *vq; |
7f98639d PO |
540 | int ret; |
541 | ||
542 | vq = v4l2_m2m_get_vq(m2m_ctx, type); | |
908a0d7c | 543 | ret = vb2_streamon(vq, type); |
7f98639d PO |
544 | if (!ret) |
545 | v4l2_m2m_try_schedule(m2m_ctx); | |
546 | ||
547 | return ret; | |
548 | } | |
549 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); | |
550 | ||
7f98639d PO |
551 | int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
552 | enum v4l2_buf_type type) | |
553 | { | |
401f6a27 JS |
554 | struct v4l2_m2m_dev *m2m_dev; |
555 | struct v4l2_m2m_queue_ctx *q_ctx; | |
556 | unsigned long flags_job, flags; | |
557 | int ret; | |
7f98639d | 558 | |
fea564a5 SAB |
559 | /* wait until the current context is dequeued from job_queue */ |
560 | v4l2_m2m_cancel_job(m2m_ctx); | |
561 | ||
401f6a27 JS |
562 | q_ctx = get_queue_ctx(m2m_ctx, type); |
563 | ret = vb2_streamoff(&q_ctx->q, type); | |
564 | if (ret) | |
565 | return ret; | |
566 | ||
567 | m2m_dev = m2m_ctx->m2m_dev; | |
568 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); | |
569 | /* We should not be scheduled anymore, since we're dropping a queue. */ | |
d7bb0ce8 PZ |
570 | if (m2m_ctx->job_flags & TRANS_QUEUED) |
571 | list_del(&m2m_ctx->queue); | |
401f6a27 JS |
572 | m2m_ctx->job_flags = 0; |
573 | ||
574 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); | |
575 | /* Drop queue, since streamoff returns device to the same state as after | |
576 | * calling reqbufs. */ | |
577 | INIT_LIST_HEAD(&q_ctx->rdy_queue); | |
84e68098 | 578 | q_ctx->num_rdy = 0; |
401f6a27 JS |
579 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
580 | ||
581 | if (m2m_dev->curr_ctx == m2m_ctx) { | |
582 | m2m_dev->curr_ctx = NULL; | |
583 | wake_up(&m2m_ctx->finished); | |
584 | } | |
585 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); | |
586 | ||
587 | return 0; | |
7f98639d PO |
588 | } |
589 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); | |
590 | ||
c23e0cb8 | 591 | __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
7f98639d PO |
592 | struct poll_table_struct *wait) |
593 | { | |
08eb8510 | 594 | struct video_device *vfd = video_devdata(file); |
01699437 | 595 | __poll_t req_events = poll_requested_events(wait); |
908a0d7c MS |
596 | struct vb2_queue *src_q, *dst_q; |
597 | struct vb2_buffer *src_vb = NULL, *dst_vb = NULL; | |
c23e0cb8 | 598 | __poll_t rc = 0; |
908a0d7c | 599 | unsigned long flags; |
7f98639d | 600 | |
08eb8510 HV |
601 | if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { |
602 | struct v4l2_fh *fh = file->private_data; | |
603 | ||
604 | if (v4l2_event_pending(fh)) | |
a9a08845 LT |
605 | rc = EPOLLPRI; |
606 | else if (req_events & EPOLLPRI) | |
08eb8510 | 607 | poll_wait(file, &fh->wait, wait); |
a9a08845 | 608 | if (!(req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM))) |
08eb8510 HV |
609 | return rc; |
610 | } | |
611 | ||
7f98639d PO |
612 | src_q = v4l2_m2m_get_src_vq(m2m_ctx); |
613 | dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); | |
614 | ||
908a0d7c MS |
615 | /* |
616 | * There has to be at least one buffer queued on each queued_list, which | |
617 | * means either in driver already or waiting for driver to claim it | |
618 | * and start processing. | |
619 | */ | |
620 | if ((!src_q->streaming || list_empty(&src_q->queued_list)) | |
621 | && (!dst_q->streaming || list_empty(&dst_q->queued_list))) { | |
a9a08845 | 622 | rc |= EPOLLERR; |
7f98639d PO |
623 | goto end; |
624 | } | |
625 | ||
f1a81afc | 626 | spin_lock_irqsave(&src_q->done_lock, flags); |
57183467 SWK |
627 | if (list_empty(&src_q->done_list)) |
628 | poll_wait(file, &src_q->done_wq, wait); | |
f1a81afc ZD |
629 | spin_unlock_irqrestore(&src_q->done_lock, flags); |
630 | ||
631 | spin_lock_irqsave(&dst_q->done_lock, flags); | |
c1621840 PZ |
632 | if (list_empty(&dst_q->done_list)) { |
633 | /* | |
634 | * If the last buffer was dequeued from the capture queue, | |
635 | * return immediately. DQBUF will return -EPIPE. | |
636 | */ | |
f1a81afc ZD |
637 | if (dst_q->last_buffer_dequeued) { |
638 | spin_unlock_irqrestore(&dst_q->done_lock, flags); | |
a9a08845 | 639 | return rc | EPOLLIN | EPOLLRDNORM; |
f1a81afc | 640 | } |
c1621840 | 641 | |
57183467 | 642 | poll_wait(file, &dst_q->done_wq, wait); |
c1621840 | 643 | } |
f1a81afc | 644 | spin_unlock_irqrestore(&dst_q->done_lock, flags); |
908a0d7c MS |
645 | |
646 | spin_lock_irqsave(&src_q->done_lock, flags); | |
647 | if (!list_empty(&src_q->done_list)) | |
648 | src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer, | |
649 | done_entry); | |
650 | if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE | |
651 | || src_vb->state == VB2_BUF_STATE_ERROR)) | |
a9a08845 | 652 | rc |= EPOLLOUT | EPOLLWRNORM; |
908a0d7c MS |
653 | spin_unlock_irqrestore(&src_q->done_lock, flags); |
654 | ||
655 | spin_lock_irqsave(&dst_q->done_lock, flags); | |
656 | if (!list_empty(&dst_q->done_list)) | |
657 | dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer, | |
658 | done_entry); | |
659 | if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE | |
660 | || dst_vb->state == VB2_BUF_STATE_ERROR)) | |
a9a08845 | 661 | rc |= EPOLLIN | EPOLLRDNORM; |
908a0d7c | 662 | spin_unlock_irqrestore(&dst_q->done_lock, flags); |
7f98639d PO |
663 | |
664 | end: | |
7f98639d PO |
665 | return rc; |
666 | } | |
667 | EXPORT_SYMBOL_GPL(v4l2_m2m_poll); | |
668 | ||
7f98639d PO |
669 | int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
670 | struct vm_area_struct *vma) | |
671 | { | |
672 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | |
908a0d7c | 673 | struct vb2_queue *vq; |
7f98639d PO |
674 | |
675 | if (offset < DST_QUEUE_OFF_BASE) { | |
676 | vq = v4l2_m2m_get_src_vq(m2m_ctx); | |
677 | } else { | |
678 | vq = v4l2_m2m_get_dst_vq(m2m_ctx); | |
679 | vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); | |
680 | } | |
681 | ||
908a0d7c | 682 | return vb2_mmap(vq, vma); |
7f98639d PO |
683 | } |
684 | EXPORT_SYMBOL(v4l2_m2m_mmap); | |
685 | ||
be2fff65 EG |
686 | #if defined(CONFIG_MEDIA_CONTROLLER) |
687 | void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) | |
688 | { | |
689 | media_remove_intf_links(&m2m_dev->intf_devnode->intf); | |
690 | media_devnode_remove(m2m_dev->intf_devnode); | |
691 | ||
692 | media_entity_remove_links(m2m_dev->source); | |
693 | media_entity_remove_links(&m2m_dev->sink); | |
694 | media_entity_remove_links(&m2m_dev->proc); | |
695 | media_device_unregister_entity(m2m_dev->source); | |
696 | media_device_unregister_entity(&m2m_dev->sink); | |
697 | media_device_unregister_entity(&m2m_dev->proc); | |
698 | kfree(m2m_dev->source->name); | |
699 | kfree(m2m_dev->sink.name); | |
700 | kfree(m2m_dev->proc.name); | |
701 | } | |
702 | EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller); | |
703 | ||
704 | static int v4l2_m2m_register_entity(struct media_device *mdev, | |
705 | struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type, | |
706 | struct video_device *vdev, int function) | |
707 | { | |
708 | struct media_entity *entity; | |
709 | struct media_pad *pads; | |
710 | char *name; | |
711 | unsigned int len; | |
712 | int num_pads; | |
713 | int ret; | |
714 | ||
715 | switch (type) { | |
716 | case MEM2MEM_ENT_TYPE_SOURCE: | |
717 | entity = m2m_dev->source; | |
718 | pads = &m2m_dev->source_pad; | |
719 | pads[0].flags = MEDIA_PAD_FL_SOURCE; | |
720 | num_pads = 1; | |
721 | break; | |
722 | case MEM2MEM_ENT_TYPE_SINK: | |
723 | entity = &m2m_dev->sink; | |
724 | pads = &m2m_dev->sink_pad; | |
725 | pads[0].flags = MEDIA_PAD_FL_SINK; | |
726 | num_pads = 1; | |
727 | break; | |
728 | case MEM2MEM_ENT_TYPE_PROC: | |
729 | entity = &m2m_dev->proc; | |
730 | pads = m2m_dev->proc_pads; | |
731 | pads[0].flags = MEDIA_PAD_FL_SINK; | |
732 | pads[1].flags = MEDIA_PAD_FL_SOURCE; | |
733 | num_pads = 2; | |
734 | break; | |
735 | default: | |
736 | return -EINVAL; | |
737 | } | |
738 | ||
739 | entity->obj_type = MEDIA_ENTITY_TYPE_BASE; | |
740 | if (type != MEM2MEM_ENT_TYPE_PROC) { | |
741 | entity->info.dev.major = VIDEO_MAJOR; | |
742 | entity->info.dev.minor = vdev->minor; | |
743 | } | |
744 | len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]); | |
745 | name = kmalloc(len, GFP_KERNEL); | |
746 | if (!name) | |
747 | return -ENOMEM; | |
748 | snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]); | |
749 | entity->name = name; | |
750 | entity->function = function; | |
751 | ||
752 | ret = media_entity_pads_init(entity, num_pads, pads); | |
753 | if (ret) | |
754 | return ret; | |
755 | ret = media_device_register_entity(mdev, entity); | |
756 | if (ret) | |
757 | return ret; | |
758 | ||
759 | return 0; | |
760 | } | |
761 | ||
762 | int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, | |
763 | struct video_device *vdev, int function) | |
764 | { | |
765 | struct media_device *mdev = vdev->v4l2_dev->mdev; | |
766 | struct media_link *link; | |
767 | int ret; | |
768 | ||
769 | if (!mdev) | |
770 | return 0; | |
771 | ||
772 | /* A memory-to-memory device consists in two | |
773 | * DMA engine and one video processing entities. | |
774 | * The DMA engine entities are linked to a V4L interface | |
775 | */ | |
776 | ||
777 | /* Create the three entities with their pads */ | |
778 | m2m_dev->source = &vdev->entity; | |
779 | ret = v4l2_m2m_register_entity(mdev, m2m_dev, | |
780 | MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L); | |
781 | if (ret) | |
782 | return ret; | |
783 | ret = v4l2_m2m_register_entity(mdev, m2m_dev, | |
784 | MEM2MEM_ENT_TYPE_PROC, vdev, function); | |
785 | if (ret) | |
786 | goto err_rel_entity0; | |
787 | ret = v4l2_m2m_register_entity(mdev, m2m_dev, | |
788 | MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L); | |
789 | if (ret) | |
790 | goto err_rel_entity1; | |
791 | ||
792 | /* Connect the three entities */ | |
793 | ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 1, | |
794 | MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); | |
795 | if (ret) | |
796 | goto err_rel_entity2; | |
797 | ||
798 | ret = media_create_pad_link(&m2m_dev->proc, 0, &m2m_dev->sink, 0, | |
799 | MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); | |
800 | if (ret) | |
801 | goto err_rm_links0; | |
802 | ||
803 | /* Create video interface */ | |
804 | m2m_dev->intf_devnode = media_devnode_create(mdev, | |
805 | MEDIA_INTF_T_V4L_VIDEO, 0, | |
806 | VIDEO_MAJOR, vdev->minor); | |
807 | if (!m2m_dev->intf_devnode) { | |
808 | ret = -ENOMEM; | |
809 | goto err_rm_links1; | |
810 | } | |
811 | ||
812 | /* Connect the two DMA engines to the interface */ | |
813 | link = media_create_intf_link(m2m_dev->source, | |
814 | &m2m_dev->intf_devnode->intf, | |
815 | MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); | |
816 | if (!link) { | |
817 | ret = -ENOMEM; | |
818 | goto err_rm_devnode; | |
819 | } | |
820 | ||
821 | link = media_create_intf_link(&m2m_dev->sink, | |
822 | &m2m_dev->intf_devnode->intf, | |
823 | MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); | |
824 | if (!link) { | |
825 | ret = -ENOMEM; | |
826 | goto err_rm_intf_link; | |
827 | } | |
828 | return 0; | |
829 | ||
830 | err_rm_intf_link: | |
831 | media_remove_intf_links(&m2m_dev->intf_devnode->intf); | |
832 | err_rm_devnode: | |
833 | media_devnode_remove(m2m_dev->intf_devnode); | |
834 | err_rm_links1: | |
835 | media_entity_remove_links(&m2m_dev->sink); | |
836 | err_rm_links0: | |
837 | media_entity_remove_links(&m2m_dev->proc); | |
838 | media_entity_remove_links(m2m_dev->source); | |
839 | err_rel_entity2: | |
840 | media_device_unregister_entity(&m2m_dev->proc); | |
841 | kfree(m2m_dev->proc.name); | |
842 | err_rel_entity1: | |
843 | media_device_unregister_entity(&m2m_dev->sink); | |
844 | kfree(m2m_dev->sink.name); | |
845 | err_rel_entity0: | |
846 | media_device_unregister_entity(m2m_dev->source); | |
847 | kfree(m2m_dev->source->name); | |
848 | return ret; | |
849 | return 0; | |
850 | } | |
851 | EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller); | |
852 | #endif | |
853 | ||
b1252eb8 | 854 | struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) |
7f98639d PO |
855 | { |
856 | struct v4l2_m2m_dev *m2m_dev; | |
857 | ||
5525b831 | 858 | if (!m2m_ops || WARN_ON(!m2m_ops->device_run)) |
7f98639d PO |
859 | return ERR_PTR(-EINVAL); |
860 | ||
7f98639d PO |
861 | m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); |
862 | if (!m2m_dev) | |
863 | return ERR_PTR(-ENOMEM); | |
864 | ||
865 | m2m_dev->curr_ctx = NULL; | |
866 | m2m_dev->m2m_ops = m2m_ops; | |
867 | INIT_LIST_HEAD(&m2m_dev->job_queue); | |
868 | spin_lock_init(&m2m_dev->job_spinlock); | |
869 | ||
870 | return m2m_dev; | |
871 | } | |
872 | EXPORT_SYMBOL_GPL(v4l2_m2m_init); | |
873 | ||
7f98639d PO |
874 | void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) |
875 | { | |
876 | kfree(m2m_dev); | |
877 | } | |
878 | EXPORT_SYMBOL_GPL(v4l2_m2m_release); | |
879 | ||
908a0d7c MS |
880 | struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, |
881 | void *drv_priv, | |
882 | int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) | |
7f98639d PO |
883 | { |
884 | struct v4l2_m2m_ctx *m2m_ctx; | |
885 | struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; | |
908a0d7c | 886 | int ret; |
7f98639d PO |
887 | |
888 | m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); | |
889 | if (!m2m_ctx) | |
890 | return ERR_PTR(-ENOMEM); | |
891 | ||
908a0d7c | 892 | m2m_ctx->priv = drv_priv; |
7f98639d | 893 | m2m_ctx->m2m_dev = m2m_dev; |
908a0d7c | 894 | init_waitqueue_head(&m2m_ctx->finished); |
7f98639d | 895 | |
908a0d7c MS |
896 | out_q_ctx = &m2m_ctx->out_q_ctx; |
897 | cap_q_ctx = &m2m_ctx->cap_q_ctx; | |
7f98639d PO |
898 | |
899 | INIT_LIST_HEAD(&out_q_ctx->rdy_queue); | |
900 | INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); | |
908a0d7c MS |
901 | spin_lock_init(&out_q_ctx->rdy_spinlock); |
902 | spin_lock_init(&cap_q_ctx->rdy_spinlock); | |
7f98639d PO |
903 | |
904 | INIT_LIST_HEAD(&m2m_ctx->queue); | |
905 | ||
908a0d7c MS |
906 | ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); |
907 | ||
908 | if (ret) | |
909 | goto err; | |
8e6e8f93 SN |
910 | /* |
911 | * If both queues use same mutex assign it as the common buffer | |
912 | * queues lock to the m2m context. This lock is used in the | |
913 | * v4l2_m2m_ioctl_* helpers. | |
914 | */ | |
915 | if (out_q_ctx->q.lock == cap_q_ctx->q.lock) | |
916 | m2m_ctx->q_lock = out_q_ctx->q.lock; | |
7f98639d PO |
917 | |
918 | return m2m_ctx; | |
908a0d7c MS |
919 | err: |
920 | kfree(m2m_ctx); | |
921 | return ERR_PTR(ret); | |
7f98639d PO |
922 | } |
923 | EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); | |
924 | ||
7f98639d PO |
925 | void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) |
926 | { | |
fea564a5 SAB |
927 | /* wait until the current context is dequeued from job_queue */ |
928 | v4l2_m2m_cancel_job(m2m_ctx); | |
7f98639d | 929 | |
908a0d7c MS |
930 | vb2_queue_release(&m2m_ctx->cap_q_ctx.q); |
931 | vb2_queue_release(&m2m_ctx->out_q_ctx.q); | |
7f98639d PO |
932 | |
933 | kfree(m2m_ctx); | |
934 | } | |
935 | EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); | |
936 | ||
2d700715 JS |
937 | void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, |
938 | struct vb2_v4l2_buffer *vbuf) | |
7f98639d | 939 | { |
2d700715 JS |
940 | struct v4l2_m2m_buffer *b = container_of(vbuf, |
941 | struct v4l2_m2m_buffer, vb); | |
7f98639d | 942 | struct v4l2_m2m_queue_ctx *q_ctx; |
908a0d7c | 943 | unsigned long flags; |
7f98639d | 944 | |
2d700715 | 945 | q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type); |
7f98639d PO |
946 | if (!q_ctx) |
947 | return; | |
948 | ||
908a0d7c MS |
949 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
950 | list_add_tail(&b->list, &q_ctx->rdy_queue); | |
7f98639d | 951 | q_ctx->num_rdy++; |
908a0d7c | 952 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
7f98639d PO |
953 | } |
954 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); | |
955 | ||
ef86eaf9 | 956 | void v4l2_m2m_request_queue(struct media_request *req) |
803a7ab7 HV |
957 | { |
958 | struct media_request_object *obj, *obj_safe; | |
959 | struct v4l2_m2m_ctx *m2m_ctx = NULL; | |
960 | ||
961 | /* | |
962 | * Queue all objects. Note that buffer objects are at the end of the | |
963 | * objects list, after all other object types. Once buffer objects | |
964 | * are queued, the driver might delete them immediately (if the driver | |
965 | * processes the buffer at once), so we have to use | |
966 | * list_for_each_entry_safe() to handle the case where the object we | |
967 | * queue is deleted. | |
968 | */ | |
969 | list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { | |
970 | struct v4l2_m2m_ctx *m2m_ctx_obj; | |
971 | struct vb2_buffer *vb; | |
972 | ||
973 | if (!obj->ops->queue) | |
974 | continue; | |
975 | ||
976 | if (vb2_request_object_is_buffer(obj)) { | |
977 | /* Sanity checks */ | |
978 | vb = container_of(obj, struct vb2_buffer, req_obj); | |
979 | WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)); | |
980 | m2m_ctx_obj = container_of(vb->vb2_queue, | |
981 | struct v4l2_m2m_ctx, | |
982 | out_q_ctx.q); | |
983 | WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx); | |
984 | m2m_ctx = m2m_ctx_obj; | |
985 | } | |
986 | ||
987 | /* | |
988 | * The buffer we queue here can in theory be immediately | |
989 | * unbound, hence the use of list_for_each_entry_safe() | |
990 | * above and why we call the queue op last. | |
991 | */ | |
992 | obj->ops->queue(obj); | |
993 | } | |
994 | ||
995 | WARN_ON(!m2m_ctx); | |
996 | ||
997 | if (m2m_ctx) | |
998 | v4l2_m2m_try_schedule(m2m_ctx); | |
999 | } | |
ef86eaf9 | 1000 | EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue); |
803a7ab7 | 1001 | |
8e6e8f93 SN |
1002 | /* Videobuf2 ioctl helpers */ |
1003 | ||
1004 | int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, | |
1005 | struct v4l2_requestbuffers *rb) | |
1006 | { | |
1007 | struct v4l2_fh *fh = file->private_data; | |
1008 | ||
1009 | return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); | |
1010 | } | |
1011 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); | |
1012 | ||
1013 | int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, | |
1014 | struct v4l2_create_buffers *create) | |
1015 | { | |
1016 | struct v4l2_fh *fh = file->private_data; | |
1017 | ||
1018 | return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); | |
1019 | } | |
1020 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); | |
1021 | ||
1022 | int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, | |
1023 | struct v4l2_buffer *buf) | |
1024 | { | |
1025 | struct v4l2_fh *fh = file->private_data; | |
1026 | ||
1027 | return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); | |
1028 | } | |
1029 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); | |
1030 | ||
1031 | int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, | |
1032 | struct v4l2_buffer *buf) | |
1033 | { | |
1034 | struct v4l2_fh *fh = file->private_data; | |
1035 | ||
1036 | return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); | |
1037 | } | |
1038 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); | |
1039 | ||
1040 | int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, | |
1041 | struct v4l2_buffer *buf) | |
1042 | { | |
1043 | struct v4l2_fh *fh = file->private_data; | |
1044 | ||
1045 | return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); | |
1046 | } | |
1047 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); | |
1048 | ||
e68cf471 HV |
1049 | int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv, |
1050 | struct v4l2_buffer *buf) | |
1051 | { | |
1052 | struct v4l2_fh *fh = file->private_data; | |
1053 | ||
1054 | return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); | |
1055 | } | |
1056 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf); | |
1057 | ||
8e6e8f93 SN |
1058 | int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, |
1059 | struct v4l2_exportbuffer *eb) | |
1060 | { | |
1061 | struct v4l2_fh *fh = file->private_data; | |
1062 | ||
1063 | return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); | |
1064 | } | |
1065 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); | |
1066 | ||
1067 | int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, | |
1068 | enum v4l2_buf_type type) | |
1069 | { | |
1070 | struct v4l2_fh *fh = file->private_data; | |
1071 | ||
1072 | return v4l2_m2m_streamon(file, fh->m2m_ctx, type); | |
1073 | } | |
1074 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); | |
1075 | ||
1076 | int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, | |
1077 | enum v4l2_buf_type type) | |
1078 | { | |
1079 | struct v4l2_fh *fh = file->private_data; | |
1080 | ||
1081 | return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); | |
1082 | } | |
1083 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); | |
1084 | ||
1085 | /* | |
1086 | * v4l2_file_operations helpers. It is assumed here same lock is used | |
1087 | * for the output and the capture buffer queue. | |
1088 | */ | |
1089 | ||
1090 | int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) | |
1091 | { | |
1092 | struct v4l2_fh *fh = file->private_data; | |
8e6e8f93 | 1093 | |
e752577e | 1094 | return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); |
8e6e8f93 SN |
1095 | } |
1096 | EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); | |
1097 | ||
c23e0cb8 | 1098 | __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait) |
8e6e8f93 SN |
1099 | { |
1100 | struct v4l2_fh *fh = file->private_data; | |
1101 | struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; | |
c23e0cb8 | 1102 | __poll_t ret; |
8e6e8f93 SN |
1103 | |
1104 | if (m2m_ctx->q_lock) | |
1105 | mutex_lock(m2m_ctx->q_lock); | |
1106 | ||
1107 | ret = v4l2_m2m_poll(file, m2m_ctx, wait); | |
1108 | ||
1109 | if (m2m_ctx->q_lock) | |
1110 | mutex_unlock(m2m_ctx->q_lock); | |
1111 | ||
1112 | return ret; | |
1113 | } | |
1114 | EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); | |
1115 |