2 * videobuf2-v4l2.c - V4L2 driver helper framework
4 * Copyright (C) 2010 Samsung Electronics
9 * The vb2_thread implementation was based on code from videobuf-dvb.c:
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation.
17 #include <linux/device.h>
18 #include <linux/err.h>
19 #include <linux/freezer.h>
20 #include <linux/kernel.h>
21 #include <linux/kthread.h>
23 #include <linux/module.h>
24 #include <linux/poll.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
28 #include <media/v4l2-common.h>
29 #include <media/v4l2-dev.h>
30 #include <media/v4l2-device.h>
31 #include <media/v4l2-event.h>
32 #include <media/v4l2-fh.h>
34 #include <media/videobuf2-v4l2.h>
37 module_param(debug, int, 0644);
39 #define dprintk(q, level, fmt, arg...) \
42 pr_info("vb2-v4l2: [%p] %s: " fmt, \
43 (q)->name, __func__, ## arg); \
46 /* Flags that are set by us */
47 #define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
48 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
49 V4L2_BUF_FLAG_PREPARED | \
50 V4L2_BUF_FLAG_IN_REQUEST | \
51 V4L2_BUF_FLAG_REQUEST_FD | \
52 V4L2_BUF_FLAG_TIMESTAMP_MASK)
53 /* Output buffer flags that should be passed on to the driver */
54 #define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | \
55 V4L2_BUF_FLAG_BFRAME | \
56 V4L2_BUF_FLAG_KEYFRAME | \
57 V4L2_BUF_FLAG_TIMECODE | \
58 V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF)
61 * __verify_planes_array() - verify that the planes array passed in struct
62 * v4l2_buffer from userspace can be safely used
64 static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
66 if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
69 /* Is memory for copying plane information present? */
70 if (b->m.planes == NULL) {
71 dprintk(vb->vb2_queue, 1,
72 "multi-planar buffer passed but planes array not provided\n");
76 if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
77 dprintk(vb->vb2_queue, 1,
78 "incorrect planes array length, expected %d, got %d\n",
79 vb->num_planes, b->length);
86 static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
88 return __verify_planes_array(vb, pb);
92 * __verify_length() - Verify that the bytesused value for each plane fits in
93 * the plane length and that the data offset doesn't exceed the bytesused value.
95 static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
98 unsigned int bytesused;
101 if (V4L2_TYPE_IS_CAPTURE(b->type))
104 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
105 for (plane = 0; plane < vb->num_planes; ++plane) {
106 length = (b->memory == VB2_MEMORY_USERPTR ||
107 b->memory == VB2_MEMORY_DMABUF)
108 ? b->m.planes[plane].length
109 : vb->planes[plane].length;
110 bytesused = b->m.planes[plane].bytesused
111 ? b->m.planes[plane].bytesused : length;
113 if (b->m.planes[plane].bytesused > length)
116 if (b->m.planes[plane].data_offset > 0 &&
117 b->m.planes[plane].data_offset >= bytesused)
121 length = (b->memory == VB2_MEMORY_USERPTR ||
122 b->memory == VB2_MEMORY_DMABUF)
123 ? b->length : vb->planes[0].length;
125 if (b->bytesused > length)
133 * __init_vb2_v4l2_buffer() - initialize the vb2_v4l2_buffer struct
135 static void __init_vb2_v4l2_buffer(struct vb2_buffer *vb)
137 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
139 vbuf->request_fd = -1;
142 static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
144 const struct v4l2_buffer *b = pb;
145 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
146 struct vb2_queue *q = vb->vb2_queue;
150 * For output buffers copy the timestamp if needed,
151 * and the timecode field and flag if needed.
153 if (q->copy_timestamp)
154 vb->timestamp = v4l2_buffer_get_timestamp(b);
155 vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
156 if (b->flags & V4L2_BUF_FLAG_TIMECODE)
157 vbuf->timecode = b->timecode;
161 static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
163 static bool check_once;
170 pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
171 if (vb->vb2_queue->allow_zero_bytesused)
172 pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
174 pr_warn("use the actual size instead.\n");
177 static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
179 struct vb2_queue *q = vb->vb2_queue;
180 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
181 struct vb2_plane *planes = vbuf->planes;
185 ret = __verify_length(vb, b);
187 dprintk(q, 1, "plane parameters verification failed: %d\n", ret);
190 if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
192 * If the format's field is ALTERNATE, then the buffer's field
193 * should be either TOP or BOTTOM, not ALTERNATE since that
194 * makes no sense. The driver has to know whether the
195 * buffer represents a top or a bottom field in order to
196 * program any DMA correctly. Using ALTERNATE is wrong, since
197 * that just says that it is either a top or a bottom field,
198 * but not which of the two it is.
200 dprintk(q, 1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
204 vbuf->request_fd = -1;
205 vbuf->is_held = false;
207 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
209 case VB2_MEMORY_USERPTR:
210 for (plane = 0; plane < vb->num_planes; ++plane) {
211 planes[plane].m.userptr =
212 b->m.planes[plane].m.userptr;
213 planes[plane].length =
214 b->m.planes[plane].length;
217 case VB2_MEMORY_DMABUF:
218 for (plane = 0; plane < vb->num_planes; ++plane) {
220 b->m.planes[plane].m.fd;
221 planes[plane].length =
222 b->m.planes[plane].length;
226 for (plane = 0; plane < vb->num_planes; ++plane) {
227 planes[plane].m.offset =
228 vb->planes[plane].m.offset;
229 planes[plane].length =
230 vb->planes[plane].length;
235 /* Fill in driver-provided information for OUTPUT types */
236 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
238 * Will have to go up to b->length when API starts
239 * accepting variable number of planes.
241 * If bytesused == 0 for the output buffer, then fall
242 * back to the full buffer size. In that case
243 * userspace clearly never bothered to set it and
244 * it's a safe assumption that they really meant to
245 * use the full plane sizes.
247 * Some drivers, e.g. old codec drivers, use bytesused == 0
248 * as a way to indicate that streaming is finished.
249 * In that case, the driver should use the
250 * allow_zero_bytesused flag to keep old userspace
251 * applications working.
253 for (plane = 0; plane < vb->num_planes; ++plane) {
254 struct vb2_plane *pdst = &planes[plane];
255 struct v4l2_plane *psrc = &b->m.planes[plane];
257 if (psrc->bytesused == 0)
258 vb2_warn_zero_bytesused(vb);
260 if (vb->vb2_queue->allow_zero_bytesused)
261 pdst->bytesused = psrc->bytesused;
263 pdst->bytesused = psrc->bytesused ?
264 psrc->bytesused : pdst->length;
265 pdst->data_offset = psrc->data_offset;
270 * Single-planar buffers do not use planes array,
271 * so fill in relevant v4l2_buffer struct fields instead.
272 * In videobuf we use our internal V4l2_planes struct for
273 * single-planar buffers as well, for simplicity.
275 * If bytesused == 0 for the output buffer, then fall back
276 * to the full buffer size as that's a sensible default.
278 * Some drivers, e.g. old codec drivers, use bytesused == 0 as
279 * a way to indicate that streaming is finished. In that case,
280 * the driver should use the allow_zero_bytesused flag to keep
281 * old userspace applications working.
284 case VB2_MEMORY_USERPTR:
285 planes[0].m.userptr = b->m.userptr;
286 planes[0].length = b->length;
288 case VB2_MEMORY_DMABUF:
289 planes[0].m.fd = b->m.fd;
290 planes[0].length = b->length;
293 planes[0].m.offset = vb->planes[0].m.offset;
294 planes[0].length = vb->planes[0].length;
298 planes[0].data_offset = 0;
299 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
300 if (b->bytesused == 0)
301 vb2_warn_zero_bytesused(vb);
303 if (vb->vb2_queue->allow_zero_bytesused)
304 planes[0].bytesused = b->bytesused;
306 planes[0].bytesused = b->bytesused ?
307 b->bytesused : planes[0].length;
309 planes[0].bytesused = 0;
313 /* Zero flags that we handle */
314 vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
315 if (!vb->vb2_queue->copy_timestamp || V4L2_TYPE_IS_CAPTURE(b->type)) {
317 * Non-COPY timestamps and non-OUTPUT queues will get
318 * their timestamp and timestamp source flags from the
321 vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
324 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
326 * For output buffers mask out the timecode flag:
327 * this will be handled later in vb2_qbuf().
328 * The 'field' is valid metadata for this output buffer
329 * and so that needs to be copied here.
331 vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
332 vbuf->field = b->field;
333 if (!(q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
334 vbuf->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
336 /* Zero any output buffer flags as this is a capture buffer */
337 vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
338 /* Zero last flag, this is a signal from driver to userspace */
339 vbuf->flags &= ~V4L2_BUF_FLAG_LAST;
345 static void set_buffer_cache_hints(struct vb2_queue *q,
346 struct vb2_buffer *vb,
347 struct v4l2_buffer *b)
350 * DMA exporter should take care of cache syncs, so we can avoid
351 * explicit ->prepare()/->finish() syncs. For other ->memory types
352 * we always need ->prepare() or/and ->finish() cache sync.
354 if (q->memory == VB2_MEMORY_DMABUF) {
355 vb->need_cache_sync_on_finish = 0;
356 vb->need_cache_sync_on_prepare = 0;
361 * Cache sync/invalidation flags are set by default in order to
362 * preserve existing behaviour for old apps/drivers.
364 vb->need_cache_sync_on_prepare = 1;
365 vb->need_cache_sync_on_finish = 1;
367 if (!vb2_queue_allows_cache_hints(q)) {
369 * Clear buffer cache flags if queue does not support user
370 * space hints. That's to indicate to userspace that these
373 b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_INVALIDATE;
374 b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_CLEAN;
379 * ->finish() cache sync can be avoided when queue direction is
382 if (q->dma_dir == DMA_TO_DEVICE)
383 vb->need_cache_sync_on_finish = 0;
385 if (b->flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE)
386 vb->need_cache_sync_on_finish = 0;
388 if (b->flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN)
389 vb->need_cache_sync_on_prepare = 0;
392 static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
393 struct v4l2_buffer *b, bool is_prepare,
394 struct media_request **p_req)
396 const char *opname = is_prepare ? "prepare_buf" : "qbuf";
397 struct media_request *req;
398 struct vb2_v4l2_buffer *vbuf;
399 struct vb2_buffer *vb;
402 if (b->type != q->type) {
403 dprintk(q, 1, "%s: invalid buffer type\n", opname);
407 if (b->index >= q->num_buffers) {
408 dprintk(q, 1, "%s: buffer index out of range\n", opname);
412 if (q->bufs[b->index] == NULL) {
413 /* Should never happen */
414 dprintk(q, 1, "%s: buffer is NULL\n", opname);
418 if (b->memory != q->memory) {
419 dprintk(q, 1, "%s: invalid memory type\n", opname);
423 vb = q->bufs[b->index];
424 vbuf = to_vb2_v4l2_buffer(vb);
425 ret = __verify_planes_array(vb, b);
429 if (!is_prepare && (b->flags & V4L2_BUF_FLAG_REQUEST_FD) &&
430 vb->state != VB2_BUF_STATE_DEQUEUED) {
431 dprintk(q, 1, "%s: buffer is not in dequeued state\n", opname);
436 set_buffer_cache_hints(q, vb, b);
437 /* Copy relevant information provided by the userspace */
438 memset(vbuf->planes, 0,
439 sizeof(vbuf->planes[0]) * vb->num_planes);
440 ret = vb2_fill_vb2_v4l2_buffer(vb, b);
448 if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
449 if (q->requires_requests) {
450 dprintk(q, 1, "%s: queue requires requests\n", opname);
453 if (q->uses_requests) {
454 dprintk(q, 1, "%s: queue uses requests\n", opname);
458 } else if (!q->supports_requests) {
459 dprintk(q, 1, "%s: queue does not support requests\n", opname);
461 } else if (q->uses_qbuf) {
462 dprintk(q, 1, "%s: queue does not use requests\n", opname);
467 * For proper locking when queueing a request you need to be able
468 * to lock access to the vb2 queue, so check that there is a lock
469 * that we can use. In addition p_req must be non-NULL.
471 if (WARN_ON(!q->lock || !p_req))
475 * Make sure this op is implemented by the driver. It's easy to forget
476 * this callback, but is it important when canceling a buffer in a
479 if (WARN_ON(!q->ops->buf_request_complete))
482 * Make sure this op is implemented by the driver for the output queue.
483 * It's easy to forget this callback, but is it important to correctly
484 * validate the 'field' value at QBUF time.
486 if (WARN_ON((q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
487 q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
488 !q->ops->buf_out_validate))
491 if (b->request_fd < 0) {
492 dprintk(q, 1, "%s: request_fd < 0\n", opname);
496 req = media_request_get_by_fd(mdev, b->request_fd);
498 dprintk(q, 1, "%s: invalid request_fd\n", opname);
503 * Early sanity check. This is checked again when the buffer
504 * is bound to the request in vb2_core_qbuf().
506 if (req->state != MEDIA_REQUEST_STATE_IDLE &&
507 req->state != MEDIA_REQUEST_STATE_UPDATING) {
508 dprintk(q, 1, "%s: request is not idle\n", opname);
509 media_request_put(req);
514 vbuf->request_fd = b->request_fd;
520 * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
521 * returned to userspace
523 static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
525 struct v4l2_buffer *b = pb;
526 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
527 struct vb2_queue *q = vb->vb2_queue;
530 /* Copy back data such as timestamp, flags, etc. */
531 b->index = vb->index;
533 b->memory = vb->memory;
536 b->flags = vbuf->flags;
537 b->field = vbuf->field;
538 v4l2_buffer_set_timestamp(b, vb->timestamp);
539 b->timecode = vbuf->timecode;
540 b->sequence = vbuf->sequence;
544 if (q->is_multiplanar) {
546 * Fill in plane-related data if userspace provided an array
547 * for it. The caller has already verified memory and size.
549 b->length = vb->num_planes;
550 for (plane = 0; plane < vb->num_planes; ++plane) {
551 struct v4l2_plane *pdst = &b->m.planes[plane];
552 struct vb2_plane *psrc = &vb->planes[plane];
554 pdst->bytesused = psrc->bytesused;
555 pdst->length = psrc->length;
556 if (q->memory == VB2_MEMORY_MMAP)
557 pdst->m.mem_offset = psrc->m.offset;
558 else if (q->memory == VB2_MEMORY_USERPTR)
559 pdst->m.userptr = psrc->m.userptr;
560 else if (q->memory == VB2_MEMORY_DMABUF)
561 pdst->m.fd = psrc->m.fd;
562 pdst->data_offset = psrc->data_offset;
563 memset(pdst->reserved, 0, sizeof(pdst->reserved));
567 * We use length and offset in v4l2_planes array even for
568 * single-planar buffers, but userspace does not.
570 b->length = vb->planes[0].length;
571 b->bytesused = vb->planes[0].bytesused;
572 if (q->memory == VB2_MEMORY_MMAP)
573 b->m.offset = vb->planes[0].m.offset;
574 else if (q->memory == VB2_MEMORY_USERPTR)
575 b->m.userptr = vb->planes[0].m.userptr;
576 else if (q->memory == VB2_MEMORY_DMABUF)
577 b->m.fd = vb->planes[0].m.fd;
581 * Clear any buffer state related flags.
583 b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
584 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
585 if (!q->copy_timestamp) {
587 * For non-COPY timestamps, drop timestamp source bits
588 * and obtain the timestamp source from the queue.
590 b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
591 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
595 case VB2_BUF_STATE_QUEUED:
596 case VB2_BUF_STATE_ACTIVE:
597 b->flags |= V4L2_BUF_FLAG_QUEUED;
599 case VB2_BUF_STATE_IN_REQUEST:
600 b->flags |= V4L2_BUF_FLAG_IN_REQUEST;
602 case VB2_BUF_STATE_ERROR:
603 b->flags |= V4L2_BUF_FLAG_ERROR;
605 case VB2_BUF_STATE_DONE:
606 b->flags |= V4L2_BUF_FLAG_DONE;
608 case VB2_BUF_STATE_PREPARING:
609 case VB2_BUF_STATE_DEQUEUED:
614 if ((vb->state == VB2_BUF_STATE_DEQUEUED ||
615 vb->state == VB2_BUF_STATE_IN_REQUEST) &&
616 vb->synced && vb->prepared)
617 b->flags |= V4L2_BUF_FLAG_PREPARED;
619 if (vb2_buffer_in_use(q, vb))
620 b->flags |= V4L2_BUF_FLAG_MAPPED;
621 if (vbuf->request_fd >= 0) {
622 b->flags |= V4L2_BUF_FLAG_REQUEST_FD;
623 b->request_fd = vbuf->request_fd;
628 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
629 * v4l2_buffer by the userspace. It also verifies that struct
630 * v4l2_buffer has a valid number of planes.
632 static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes)
634 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
637 if (!vb->vb2_queue->copy_timestamp)
640 for (plane = 0; plane < vb->num_planes; ++plane) {
641 if (vb->vb2_queue->memory != VB2_MEMORY_MMAP) {
642 planes[plane].m = vbuf->planes[plane].m;
643 planes[plane].length = vbuf->planes[plane].length;
645 planes[plane].bytesused = vbuf->planes[plane].bytesused;
646 planes[plane].data_offset = vbuf->planes[plane].data_offset;
651 static const struct vb2_buf_ops v4l2_buf_ops = {
652 .verify_planes_array = __verify_planes_array_core,
653 .init_buffer = __init_vb2_v4l2_buffer,
654 .fill_user_buffer = __fill_v4l2_buffer,
655 .fill_vb2_buffer = __fill_vb2_buffer,
656 .copy_timestamp = __copy_timestamp,
659 int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp,
660 unsigned int start_idx)
664 for (i = start_idx; i < q->num_buffers; i++)
665 if (q->bufs[i]->copied_timestamp &&
666 q->bufs[i]->timestamp == timestamp)
670 EXPORT_SYMBOL_GPL(vb2_find_timestamp);
673 * vb2_querybuf() - query video buffer information
675 * @b: buffer struct passed from userspace to vidioc_querybuf handler
678 * Should be called from vidioc_querybuf ioctl handler in driver.
679 * This function will verify the passed v4l2_buffer structure and fill the
680 * relevant information for the userspace.
682 * The return values from this function are intended to be directly returned
683 * from vidioc_querybuf handler in driver.
685 int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
687 struct vb2_buffer *vb;
690 if (b->type != q->type) {
691 dprintk(q, 1, "wrong buffer type\n");
695 if (b->index >= q->num_buffers) {
696 dprintk(q, 1, "buffer index out of range\n");
699 vb = q->bufs[b->index];
700 ret = __verify_planes_array(vb, b);
702 vb2_core_querybuf(q, b->index, b);
705 EXPORT_SYMBOL(vb2_querybuf);
707 static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
709 *caps = V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS;
710 if (q->io_modes & VB2_MMAP)
711 *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
712 if (q->io_modes & VB2_USERPTR)
713 *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
714 if (q->io_modes & VB2_DMABUF)
715 *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
716 if (q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)
717 *caps |= V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
718 if (q->allow_cache_hints && q->io_modes & VB2_MMAP)
719 *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS;
720 #ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
721 if (q->supports_requests)
722 *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
726 int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
728 int ret = vb2_verify_memory_type(q, req->memory, req->type);
730 fill_buf_caps(q, &req->capabilities);
731 return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count);
733 EXPORT_SYMBOL_GPL(vb2_reqbufs);
735 int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
736 struct v4l2_buffer *b)
740 if (vb2_fileio_is_active(q)) {
741 dprintk(q, 1, "file io in progress\n");
745 if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
748 ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL);
750 return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
752 EXPORT_SYMBOL_GPL(vb2_prepare_buf);
754 int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
756 unsigned requested_planes = 1;
757 unsigned requested_sizes[VIDEO_MAX_PLANES];
758 struct v4l2_format *f = &create->format;
759 int ret = vb2_verify_memory_type(q, create->memory, f->type);
762 fill_buf_caps(q, &create->capabilities);
763 create->index = q->num_buffers;
764 if (create->count == 0)
765 return ret != -EBUSY ? ret : 0;
768 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
769 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
770 requested_planes = f->fmt.pix_mp.num_planes;
771 if (requested_planes == 0 ||
772 requested_planes > VIDEO_MAX_PLANES)
774 for (i = 0; i < requested_planes; i++)
776 f->fmt.pix_mp.plane_fmt[i].sizeimage;
778 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
779 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
780 requested_sizes[0] = f->fmt.pix.sizeimage;
782 case V4L2_BUF_TYPE_VBI_CAPTURE:
783 case V4L2_BUF_TYPE_VBI_OUTPUT:
784 requested_sizes[0] = f->fmt.vbi.samples_per_line *
785 (f->fmt.vbi.count[0] + f->fmt.vbi.count[1]);
787 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
788 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
789 requested_sizes[0] = f->fmt.sliced.io_size;
791 case V4L2_BUF_TYPE_SDR_CAPTURE:
792 case V4L2_BUF_TYPE_SDR_OUTPUT:
793 requested_sizes[0] = f->fmt.sdr.buffersize;
795 case V4L2_BUF_TYPE_META_CAPTURE:
796 case V4L2_BUF_TYPE_META_OUTPUT:
797 requested_sizes[0] = f->fmt.meta.buffersize;
802 for (i = 0; i < requested_planes; i++)
803 if (requested_sizes[i] == 0)
805 return ret ? ret : vb2_core_create_bufs(q, create->memory,
810 EXPORT_SYMBOL_GPL(vb2_create_bufs);
812 int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
813 struct v4l2_buffer *b)
815 struct media_request *req = NULL;
818 if (vb2_fileio_is_active(q)) {
819 dprintk(q, 1, "file io in progress\n");
823 ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req);
826 ret = vb2_core_qbuf(q, b->index, b, req);
828 media_request_put(req);
831 EXPORT_SYMBOL_GPL(vb2_qbuf);
833 int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
837 if (vb2_fileio_is_active(q)) {
838 dprintk(q, 1, "file io in progress\n");
842 if (b->type != q->type) {
843 dprintk(q, 1, "invalid buffer type\n");
847 ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
850 b->flags & V4L2_BUF_FLAG_DONE &&
851 b->flags & V4L2_BUF_FLAG_LAST)
852 q->last_buffer_dequeued = true;
855 * After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be
858 b->flags &= ~V4L2_BUF_FLAG_DONE;
862 EXPORT_SYMBOL_GPL(vb2_dqbuf);
864 int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
866 if (vb2_fileio_is_active(q)) {
867 dprintk(q, 1, "file io in progress\n");
870 return vb2_core_streamon(q, type);
872 EXPORT_SYMBOL_GPL(vb2_streamon);
874 int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
876 if (vb2_fileio_is_active(q)) {
877 dprintk(q, 1, "file io in progress\n");
880 return vb2_core_streamoff(q, type);
882 EXPORT_SYMBOL_GPL(vb2_streamoff);
884 int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
886 return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index,
887 eb->plane, eb->flags);
889 EXPORT_SYMBOL_GPL(vb2_expbuf);
891 int vb2_queue_init_name(struct vb2_queue *q, const char *name)
897 WARN_ON(q->timestamp_flags &
898 ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
899 V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
902 /* Warn that the driver should choose an appropriate timestamp type */
903 WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
904 V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
906 /* Warn that vb2_memory should match with v4l2_memory */
907 if (WARN_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP)
908 || WARN_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR)
909 || WARN_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF))
912 if (q->buf_struct_size == 0)
913 q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);
915 q->buf_ops = &v4l2_buf_ops;
916 q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
917 q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
918 q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
919 == V4L2_BUF_FLAG_TIMESTAMP_COPY;
921 * For compatibility with vb1: if QBUF hasn't been called yet, then
922 * return EPOLLERR as well. This only affects capture queues, output
923 * queues will always initialize waiting_for_buffers to false.
925 q->quirk_poll_must_check_waiting_for_buffers = true;
928 strscpy(q->name, name, sizeof(q->name));
932 return vb2_core_queue_init(q);
934 EXPORT_SYMBOL_GPL(vb2_queue_init_name);
936 int vb2_queue_init(struct vb2_queue *q)
938 return vb2_queue_init_name(q, NULL);
940 EXPORT_SYMBOL_GPL(vb2_queue_init);
942 void vb2_queue_release(struct vb2_queue *q)
944 vb2_core_queue_release(q);
946 EXPORT_SYMBOL_GPL(vb2_queue_release);
948 __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
950 struct video_device *vfd = video_devdata(file);
953 res = vb2_core_poll(q, file, wait);
955 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
956 struct v4l2_fh *fh = file->private_data;
958 poll_wait(file, &fh->wait, wait);
959 if (v4l2_event_pending(fh))
965 EXPORT_SYMBOL_GPL(vb2_poll);
968 * The following functions are not part of the vb2 core API, but are helper
969 * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
970 * and struct vb2_ops.
971 * They contain boilerplate code that most if not all drivers have to do
972 * and so they simplify the driver code.
975 /* The queue is busy if there is a owner and you are not that owner. */
976 static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
978 return vdev->queue->owner && vdev->queue->owner != file->private_data;
981 /* vb2 ioctl helpers */
983 int vb2_ioctl_reqbufs(struct file *file, void *priv,
984 struct v4l2_requestbuffers *p)
986 struct video_device *vdev = video_devdata(file);
987 int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
989 fill_buf_caps(vdev->queue, &p->capabilities);
992 if (vb2_queue_is_busy(vdev, file))
994 res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count);
995 /* If count == 0, then the owner has released all buffers and he
996 is no longer owner of the queue. Otherwise we have a new owner. */
998 vdev->queue->owner = p->count ? file->private_data : NULL;
1001 EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
1003 int vb2_ioctl_create_bufs(struct file *file, void *priv,
1004 struct v4l2_create_buffers *p)
1006 struct video_device *vdev = video_devdata(file);
1007 int res = vb2_verify_memory_type(vdev->queue, p->memory,
1010 p->index = vdev->queue->num_buffers;
1011 fill_buf_caps(vdev->queue, &p->capabilities);
1013 * If count == 0, then just check if memory and type are valid.
1014 * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
1017 return res != -EBUSY ? res : 0;
1020 if (vb2_queue_is_busy(vdev, file))
1023 res = vb2_create_bufs(vdev->queue, p);
1025 vdev->queue->owner = file->private_data;
1028 EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
1030 int vb2_ioctl_prepare_buf(struct file *file, void *priv,
1031 struct v4l2_buffer *p)
1033 struct video_device *vdev = video_devdata(file);
1035 if (vb2_queue_is_busy(vdev, file))
1037 return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p);
1039 EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
1041 int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
1043 struct video_device *vdev = video_devdata(file);
1045 /* No need to call vb2_queue_is_busy(), anyone can query buffers. */
1046 return vb2_querybuf(vdev->queue, p);
1048 EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
1050 int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
1052 struct video_device *vdev = video_devdata(file);
1054 if (vb2_queue_is_busy(vdev, file))
1056 return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p);
1058 EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
1060 int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
1062 struct video_device *vdev = video_devdata(file);
1064 if (vb2_queue_is_busy(vdev, file))
1066 return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
1068 EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
1070 int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
1072 struct video_device *vdev = video_devdata(file);
1074 if (vb2_queue_is_busy(vdev, file))
1076 return vb2_streamon(vdev->queue, i);
1078 EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
1080 int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
1082 struct video_device *vdev = video_devdata(file);
1084 if (vb2_queue_is_busy(vdev, file))
1086 return vb2_streamoff(vdev->queue, i);
1088 EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
1090 int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
1092 struct video_device *vdev = video_devdata(file);
1094 if (vb2_queue_is_busy(vdev, file))
1096 return vb2_expbuf(vdev->queue, p);
1098 EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
1100 /* v4l2_file_operations helpers */
1102 int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
1104 struct video_device *vdev = video_devdata(file);
1106 return vb2_mmap(vdev->queue, vma);
1108 EXPORT_SYMBOL_GPL(vb2_fop_mmap);
1110 int _vb2_fop_release(struct file *file, struct mutex *lock)
1112 struct video_device *vdev = video_devdata(file);
1116 if (file->private_data == vdev->queue->owner) {
1117 vb2_queue_release(vdev->queue);
1118 vdev->queue->owner = NULL;
1122 return v4l2_fh_release(file);
1124 EXPORT_SYMBOL_GPL(_vb2_fop_release);
1126 int vb2_fop_release(struct file *file)
1128 struct video_device *vdev = video_devdata(file);
1129 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1131 return _vb2_fop_release(file, lock);
1133 EXPORT_SYMBOL_GPL(vb2_fop_release);
1135 ssize_t vb2_fop_write(struct file *file, const char __user *buf,
1136 size_t count, loff_t *ppos)
1138 struct video_device *vdev = video_devdata(file);
1139 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1142 if (!(vdev->queue->io_modes & VB2_WRITE))
1144 if (lock && mutex_lock_interruptible(lock))
1145 return -ERESTARTSYS;
1146 if (vb2_queue_is_busy(vdev, file))
1148 err = vb2_write(vdev->queue, buf, count, ppos,
1149 file->f_flags & O_NONBLOCK);
1150 if (vdev->queue->fileio)
1151 vdev->queue->owner = file->private_data;
1157 EXPORT_SYMBOL_GPL(vb2_fop_write);
1159 ssize_t vb2_fop_read(struct file *file, char __user *buf,
1160 size_t count, loff_t *ppos)
1162 struct video_device *vdev = video_devdata(file);
1163 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1166 if (!(vdev->queue->io_modes & VB2_READ))
1168 if (lock && mutex_lock_interruptible(lock))
1169 return -ERESTARTSYS;
1170 if (vb2_queue_is_busy(vdev, file))
1172 err = vb2_read(vdev->queue, buf, count, ppos,
1173 file->f_flags & O_NONBLOCK);
1174 if (vdev->queue->fileio)
1175 vdev->queue->owner = file->private_data;
1181 EXPORT_SYMBOL_GPL(vb2_fop_read);
1183 __poll_t vb2_fop_poll(struct file *file, poll_table *wait)
1185 struct video_device *vdev = video_devdata(file);
1186 struct vb2_queue *q = vdev->queue;
1187 struct mutex *lock = q->lock ? q->lock : vdev->lock;
1192 * If this helper doesn't know how to lock, then you shouldn't be using
1193 * it but you should write your own.
1197 if (lock && mutex_lock_interruptible(lock))
1202 res = vb2_poll(vdev->queue, file, wait);
1204 /* If fileio was started, then we have a new queue owner. */
1205 if (!fileio && q->fileio)
1206 q->owner = file->private_data;
1211 EXPORT_SYMBOL_GPL(vb2_fop_poll);
1214 unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
1215 unsigned long len, unsigned long pgoff, unsigned long flags)
1217 struct video_device *vdev = video_devdata(file);
1219 return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
1221 EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
1224 void vb2_video_unregister_device(struct video_device *vdev)
1226 /* Check if vdev was ever registered at all */
1227 if (!vdev || !video_is_registered(vdev))
1231 * Calling this function only makes sense if vdev->queue is set.
1232 * If it is NULL, then just call video_unregister_device() instead.
1234 WARN_ON(!vdev->queue);
1237 * Take a reference to the device since video_unregister_device()
1238 * calls device_unregister(), but we don't want that to release
1239 * the device since we want to clean up the queue first.
1241 get_device(&vdev->dev);
1242 video_unregister_device(vdev);
1243 if (vdev->queue && vdev->queue->owner) {
1244 struct mutex *lock = vdev->queue->lock ?
1245 vdev->queue->lock : vdev->lock;
1249 vb2_queue_release(vdev->queue);
1250 vdev->queue->owner = NULL;
1255 * Now we put the device, and in most cases this will release
1258 put_device(&vdev->dev);
1260 EXPORT_SYMBOL_GPL(vb2_video_unregister_device);
1262 /* vb2_ops helpers. Only use if vq->lock is non-NULL. */
1264 void vb2_ops_wait_prepare(struct vb2_queue *vq)
1266 mutex_unlock(vq->lock);
1268 EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
1270 void vb2_ops_wait_finish(struct vb2_queue *vq)
1272 mutex_lock(vq->lock);
1274 EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
1277 * Note that this function is called during validation time and
1278 * thus the req_queue_mutex is held to ensure no request objects
1279 * can be added or deleted while validating. So there is no need
1280 * to protect the objects list.
1282 int vb2_request_validate(struct media_request *req)
1284 struct media_request_object *obj;
1287 if (!vb2_request_buffer_cnt(req))
1290 list_for_each_entry(obj, &req->objects, list) {
1291 if (!obj->ops->prepare)
1294 ret = obj->ops->prepare(obj);
1300 list_for_each_entry_continue_reverse(obj, &req->objects, list)
1301 if (obj->ops->unprepare)
1302 obj->ops->unprepare(obj);
1307 EXPORT_SYMBOL_GPL(vb2_request_validate);
1309 void vb2_request_queue(struct media_request *req)
1311 struct media_request_object *obj, *obj_safe;
1314 * Queue all objects. Note that buffer objects are at the end of the
1315 * objects list, after all other object types. Once buffer objects
1316 * are queued, the driver might delete them immediately (if the driver
1317 * processes the buffer at once), so we have to use
1318 * list_for_each_entry_safe() to handle the case where the object we
1321 list_for_each_entry_safe(obj, obj_safe, &req->objects, list)
1322 if (obj->ops->queue)
1323 obj->ops->queue(obj);
1325 EXPORT_SYMBOL_GPL(vb2_request_queue);
1327 MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
1329 MODULE_LICENSE("GPL");