1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2020-2021 NXP
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/videodev2.h>
14 #include <media/v4l2-device.h>
15 #include <media/v4l2-event.h>
16 #include <media/v4l2-mem2mem.h>
17 #include <media/v4l2-ioctl.h>
18 #include <media/videobuf2-v4l2.h>
19 #include <media/videobuf2-dma-contig.h>
20 #include <media/videobuf2-vmalloc.h>
25 #include "vpu_helpers.h"
27 void vpu_inst_lock(struct vpu_inst *inst)
29 mutex_lock(&inst->lock);
32 void vpu_inst_unlock(struct vpu_inst *inst)
34 mutex_unlock(&inst->lock);
37 dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no)
39 if (plane_no >= vb->num_planes)
41 return vb2_dma_contig_plane_dma_addr(vb, plane_no) +
42 vb->planes[plane_no].data_offset;
45 unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no)
47 if (plane_no >= vb->num_planes)
49 return vb2_plane_size(vb, plane_no) - vb->planes[plane_no].data_offset;
52 void vpu_set_buffer_state(struct vb2_v4l2_buffer *vbuf, unsigned int state)
54 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf);
56 vpu_buf->state = state;
59 unsigned int vpu_get_buffer_state(struct vb2_v4l2_buffer *vbuf)
61 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf);
63 return vpu_buf->state;
66 void vpu_set_buffer_average_qp(struct vb2_v4l2_buffer *vbuf, u32 qp)
68 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf);
70 vpu_buf->average_qp = qp;
73 void vpu_v4l2_set_error(struct vpu_inst *inst)
76 dev_err(inst->dev, "some error occurs in codec\n");
77 if (inst->fh.m2m_ctx) {
78 vb2_queue_error(v4l2_m2m_get_src_vq(inst->fh.m2m_ctx));
79 vb2_queue_error(v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx));
81 vpu_inst_unlock(inst);
84 int vpu_notify_eos(struct vpu_inst *inst)
86 static const struct v4l2_event ev = {
88 .type = V4L2_EVENT_EOS
91 vpu_trace(inst->dev, "[%d]\n", inst->id);
92 v4l2_event_queue_fh(&inst->fh, &ev);
97 int vpu_notify_source_change(struct vpu_inst *inst)
99 static const struct v4l2_event ev = {
101 .type = V4L2_EVENT_SOURCE_CHANGE,
102 .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION
105 vpu_trace(inst->dev, "[%d]\n", inst->id);
106 v4l2_event_queue_fh(&inst->fh, &ev);
110 int vpu_set_last_buffer_dequeued(struct vpu_inst *inst, bool eos)
114 if (!inst || !inst->fh.m2m_ctx)
117 q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
118 if (!list_empty(&q->done_list))
121 if (q->last_buffer_dequeued)
123 vpu_trace(inst->dev, "last buffer dequeued\n");
124 q->last_buffer_dequeued = true;
125 wake_up(&q->done_wq);
127 vpu_notify_eos(inst);
131 bool vpu_is_source_empty(struct vpu_inst *inst)
133 struct v4l2_m2m_buffer *buf = NULL;
135 if (!inst->fh.m2m_ctx)
137 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
138 if (vpu_get_buffer_state(&buf->vb) == VPU_BUF_STATE_IDLE)
144 static int vpu_init_format(struct vpu_inst *inst, struct vpu_format *fmt)
146 const struct vpu_format *info;
148 info = vpu_helper_find_format(inst, fmt->type, fmt->pixfmt);
150 info = vpu_helper_enum_format(inst, fmt->type, 0);
154 memcpy(fmt, info, sizeof(*fmt));
159 static int vpu_calc_fmt_bytesperline(struct v4l2_format *f, struct vpu_format *fmt)
161 struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
164 if (fmt->flags & V4L2_FMT_FLAG_COMPRESSED) {
165 for (i = 0; i < fmt->comp_planes; i++)
166 fmt->bytesperline[i] = 0;
169 if (pixmp->num_planes == fmt->comp_planes) {
170 for (i = 0; i < fmt->comp_planes; i++)
171 fmt->bytesperline[i] = pixmp->plane_fmt[i].bytesperline;
174 if (pixmp->num_planes > 1)
177 /*amphion vpu only support nv12 and nv12 tiled,
178 * so the bytesperline of luma and chroma should be same
180 for (i = 0; i < fmt->comp_planes; i++)
181 fmt->bytesperline[i] = pixmp->plane_fmt[0].bytesperline;
186 static int vpu_calc_fmt_sizeimage(struct vpu_inst *inst, struct vpu_format *fmt)
191 if (!(fmt->flags & V4L2_FMT_FLAG_COMPRESSED)) {
192 const struct vpu_core_resources *res = vpu_get_resource(inst);
195 stride = res->stride;
198 for (i = 0; i < fmt->comp_planes; i++) {
199 fmt->sizeimage[i] = vpu_helper_get_plane_size(fmt->pixfmt,
204 fmt->field != V4L2_FIELD_NONE ? 1 : 0,
205 &fmt->bytesperline[i]);
206 fmt->sizeimage[i] = max_t(u32, fmt->sizeimage[i], PAGE_SIZE);
207 if (fmt->flags & V4L2_FMT_FLAG_COMPRESSED) {
208 fmt->sizeimage[i] = clamp_val(fmt->sizeimage[i], SZ_128K, SZ_8M);
209 fmt->bytesperline[i] = 0;
216 u32 vpu_get_fmt_plane_size(struct vpu_format *fmt, u32 plane_no)
221 if (plane_no >= fmt->mem_planes)
224 if (fmt->comp_planes == fmt->mem_planes)
225 return fmt->sizeimage[plane_no];
226 if (plane_no < fmt->mem_planes - 1)
227 return fmt->sizeimage[plane_no];
229 size = fmt->sizeimage[plane_no];
230 for (i = fmt->mem_planes; i < fmt->comp_planes; i++)
231 size += fmt->sizeimage[i];
236 int vpu_try_fmt_common(struct vpu_inst *inst, struct v4l2_format *f, struct vpu_format *fmt)
238 struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
242 fmt->pixfmt = pixmp->pixelformat;
244 ret = vpu_init_format(inst, fmt);
248 fmt->width = pixmp->width;
249 fmt->height = pixmp->height;
251 fmt->width = vpu_helper_valid_frame_width(inst, fmt->width);
253 fmt->height = vpu_helper_valid_frame_height(inst, fmt->height);
254 fmt->field = pixmp->field == V4L2_FIELD_ANY ? V4L2_FIELD_NONE : pixmp->field;
255 vpu_calc_fmt_bytesperline(f, fmt);
256 vpu_calc_fmt_sizeimage(inst, fmt);
257 if ((fmt->flags & V4L2_FMT_FLAG_COMPRESSED) && pixmp->plane_fmt[0].sizeimage)
258 fmt->sizeimage[0] = clamp_val(pixmp->plane_fmt[0].sizeimage, SZ_128K, SZ_8M);
260 pixmp->pixelformat = fmt->pixfmt;
261 pixmp->width = fmt->width;
262 pixmp->height = fmt->height;
263 pixmp->flags = fmt->flags;
264 pixmp->num_planes = fmt->mem_planes;
265 pixmp->field = fmt->field;
266 memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
267 for (i = 0; i < pixmp->num_planes; i++) {
268 pixmp->plane_fmt[i].bytesperline = fmt->bytesperline[i];
269 pixmp->plane_fmt[i].sizeimage = vpu_get_fmt_plane_size(fmt, i);
270 memset(pixmp->plane_fmt[i].reserved, 0, sizeof(pixmp->plane_fmt[i].reserved));
276 static bool vpu_check_ready(struct vpu_inst *inst, u32 type)
280 if (inst->state == VPU_CODEC_STATE_DEINIT || inst->id < 0)
282 if (!inst->ops->check_ready)
284 return call_vop(inst, check_ready, type);
287 int vpu_process_output_buffer(struct vpu_inst *inst)
289 struct v4l2_m2m_buffer *buf = NULL;
290 struct vb2_v4l2_buffer *vbuf = NULL;
292 if (!inst || !inst->fh.m2m_ctx)
295 if (!vpu_check_ready(inst, inst->out_format.type))
298 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
300 if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_IDLE)
308 dev_dbg(inst->dev, "[%d]frame id = %d / %d\n",
309 inst->id, vbuf->sequence, inst->sequence);
310 return call_vop(inst, process_output, &vbuf->vb2_buf);
313 int vpu_process_capture_buffer(struct vpu_inst *inst)
315 struct v4l2_m2m_buffer *buf = NULL;
316 struct vb2_v4l2_buffer *vbuf = NULL;
318 if (!inst || !inst->fh.m2m_ctx)
321 if (!vpu_check_ready(inst, inst->cap_format.type))
324 v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) {
326 if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_IDLE)
333 return call_vop(inst, process_capture, &vbuf->vb2_buf);
336 struct vb2_v4l2_buffer *vpu_next_src_buf(struct vpu_inst *inst)
338 struct vb2_v4l2_buffer *src_buf = NULL;
340 if (!inst->fh.m2m_ctx)
343 src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx);
344 if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE)
347 while (vpu_vb_is_codecconfig(src_buf)) {
348 v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx);
349 vpu_set_buffer_state(src_buf, VPU_BUF_STATE_IDLE);
350 v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
352 src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx);
353 if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE)
360 void vpu_skip_frame(struct vpu_inst *inst, int count)
362 struct vb2_v4l2_buffer *src_buf;
363 enum vb2_buffer_state state;
366 if (count <= 0 || !inst->fh.m2m_ctx)
370 src_buf = v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx);
371 if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE)
373 if (vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_DECODED)
374 state = VB2_BUF_STATE_DONE;
376 state = VB2_BUF_STATE_ERROR;
378 vpu_set_buffer_state(src_buf, VPU_BUF_STATE_IDLE);
379 v4l2_m2m_buf_done(src_buf, state);
383 struct vb2_v4l2_buffer *vpu_find_buf_by_sequence(struct vpu_inst *inst, u32 type, u32 sequence)
385 struct v4l2_m2m_buffer *buf = NULL;
386 struct vb2_v4l2_buffer *vbuf = NULL;
388 if (!inst || !inst->fh.m2m_ctx)
391 if (V4L2_TYPE_IS_OUTPUT(type)) {
392 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
394 if (vbuf->sequence == sequence)
399 v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) {
401 if (vbuf->sequence == sequence)
410 struct vb2_v4l2_buffer *vpu_find_buf_by_idx(struct vpu_inst *inst, u32 type, u32 idx)
412 struct v4l2_m2m_buffer *buf = NULL;
413 struct vb2_v4l2_buffer *vbuf = NULL;
415 if (!inst || !inst->fh.m2m_ctx)
418 if (V4L2_TYPE_IS_OUTPUT(type)) {
419 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
421 if (vbuf->vb2_buf.index == idx)
426 v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) {
428 if (vbuf->vb2_buf.index == idx)
437 int vpu_get_num_buffers(struct vpu_inst *inst, u32 type)
441 if (!inst || !inst->fh.m2m_ctx)
444 if (V4L2_TYPE_IS_OUTPUT(type))
445 q = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx);
447 q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
449 return vb2_get_num_buffers(q);
452 static void vpu_m2m_device_run(void *priv)
456 static void vpu_m2m_job_abort(void *priv)
458 struct vpu_inst *inst = priv;
459 struct v4l2_m2m_ctx *m2m_ctx = inst->fh.m2m_ctx;
461 v4l2_m2m_job_finish(m2m_ctx->m2m_dev, m2m_ctx);
464 static const struct v4l2_m2m_ops vpu_m2m_ops = {
465 .device_run = vpu_m2m_device_run,
466 .job_abort = vpu_m2m_job_abort
469 static int vpu_vb2_queue_setup(struct vb2_queue *vq,
470 unsigned int *buf_count,
471 unsigned int *plane_count,
472 unsigned int psize[],
473 struct device *allocators[])
475 struct vpu_inst *inst = vb2_get_drv_priv(vq);
476 struct vpu_format *cur_fmt;
479 cur_fmt = vpu_get_format(inst, vq->type);
482 if (*plane_count != cur_fmt->mem_planes)
484 for (i = 0; i < cur_fmt->mem_planes; i++) {
485 if (psize[i] < vpu_get_fmt_plane_size(cur_fmt, i))
491 if (V4L2_TYPE_IS_OUTPUT(vq->type))
492 *buf_count = max_t(unsigned int, *buf_count, inst->min_buffer_out);
494 *buf_count = max_t(unsigned int, *buf_count, inst->min_buffer_cap);
495 *plane_count = cur_fmt->mem_planes;
496 for (i = 0; i < cur_fmt->mem_planes; i++)
497 psize[i] = vpu_get_fmt_plane_size(cur_fmt, i);
499 if (V4L2_TYPE_IS_OUTPUT(vq->type) && inst->state == VPU_CODEC_STATE_SEEK) {
500 vpu_trace(inst->dev, "reinit when VIDIOC_REQBUFS(OUTPUT, 0)\n");
501 call_void_vop(inst, release);
507 static int vpu_vb2_buf_init(struct vb2_buffer *vb)
509 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
511 vpu_set_buffer_state(vbuf, VPU_BUF_STATE_IDLE);
515 static int vpu_vb2_buf_out_validate(struct vb2_buffer *vb)
517 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
519 vbuf->field = V4L2_FIELD_NONE;
524 static int vpu_vb2_buf_prepare(struct vb2_buffer *vb)
526 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
527 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
528 struct vpu_format *cur_fmt;
531 cur_fmt = vpu_get_format(inst, vb->type);
532 for (i = 0; i < cur_fmt->mem_planes; i++) {
533 if (vpu_get_vb_length(vb, i) < vpu_get_fmt_plane_size(cur_fmt, i)) {
534 dev_dbg(inst->dev, "[%d] %s buf[%d] is invalid\n",
535 inst->id, vpu_type_name(vb->type), vb->index);
536 vpu_set_buffer_state(vbuf, VPU_BUF_STATE_ERROR);
543 static void vpu_vb2_buf_finish(struct vb2_buffer *vb)
545 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
546 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
547 struct vb2_queue *q = vb->vb2_queue;
549 if (V4L2_TYPE_IS_CAPTURE(vb->type)) {
550 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf);
551 struct v4l2_ctrl *ctrl = v4l2_ctrl_find(&inst->ctrl_handler,
552 V4L2_CID_MPEG_VIDEO_AVERAGE_QP);
555 v4l2_ctrl_s_ctrl(ctrl, vpu_buf->average_qp);
558 if (vbuf->flags & V4L2_BUF_FLAG_LAST)
559 vpu_notify_eos(inst);
561 if (list_empty(&q->done_list))
562 call_void_vop(inst, on_queue_empty, q->type);
565 void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state)
567 struct vb2_v4l2_buffer *buf;
569 if (V4L2_TYPE_IS_OUTPUT(type)) {
570 while ((buf = v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx))) {
571 vpu_set_buffer_state(buf, VPU_BUF_STATE_IDLE);
572 v4l2_m2m_buf_done(buf, state);
575 while ((buf = v4l2_m2m_dst_buf_remove(inst->fh.m2m_ctx))) {
576 vpu_set_buffer_state(buf, VPU_BUF_STATE_IDLE);
577 v4l2_m2m_buf_done(buf, state);
582 static int vpu_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
584 struct vpu_inst *inst = vb2_get_drv_priv(q);
585 struct vpu_format *fmt = vpu_get_format(inst, q->type);
588 vpu_inst_unlock(inst);
589 ret = vpu_inst_register(inst);
592 vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED);
596 vpu_trace(inst->dev, "[%d] %s %c%c%c%c %dx%d %u(%u) %u(%u) %u(%u) %d\n",
597 inst->id, vpu_type_name(q->type),
602 fmt->width, fmt->height,
603 fmt->sizeimage[0], fmt->bytesperline[0],
604 fmt->sizeimage[1], fmt->bytesperline[1],
605 fmt->sizeimage[2], fmt->bytesperline[2],
606 vb2_get_num_buffers(q));
607 vb2_clear_last_buffer_dequeued(q);
608 ret = call_vop(inst, start, q->type);
610 vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED);
615 static void vpu_vb2_stop_streaming(struct vb2_queue *q)
617 struct vpu_inst *inst = vb2_get_drv_priv(q);
619 vpu_trace(inst->dev, "[%d] %s\n", inst->id, vpu_type_name(q->type));
621 call_void_vop(inst, stop, q->type);
622 vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_ERROR);
623 if (V4L2_TYPE_IS_OUTPUT(q->type))
627 static void vpu_vb2_buf_queue(struct vb2_buffer *vb)
629 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
630 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
632 if (V4L2_TYPE_IS_OUTPUT(vb->type))
633 vbuf->sequence = inst->sequence++;
635 v4l2_m2m_buf_queue(inst->fh.m2m_ctx, vbuf);
636 vpu_process_output_buffer(inst);
637 vpu_process_capture_buffer(inst);
640 static const struct vb2_ops vpu_vb2_ops = {
641 .queue_setup = vpu_vb2_queue_setup,
642 .buf_init = vpu_vb2_buf_init,
643 .buf_out_validate = vpu_vb2_buf_out_validate,
644 .buf_prepare = vpu_vb2_buf_prepare,
645 .buf_finish = vpu_vb2_buf_finish,
646 .start_streaming = vpu_vb2_start_streaming,
647 .stop_streaming = vpu_vb2_stop_streaming,
648 .buf_queue = vpu_vb2_buf_queue,
649 .wait_prepare = vb2_ops_wait_prepare,
650 .wait_finish = vb2_ops_wait_finish,
653 static int vpu_m2m_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
655 struct vpu_inst *inst = priv;
658 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
659 inst->out_format.type = src_vq->type;
660 src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
661 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
662 src_vq->ops = &vpu_vb2_ops;
663 src_vq->mem_ops = &vb2_dma_contig_memops;
664 if (inst->type == VPU_CORE_TYPE_DEC && inst->use_stream_buffer)
665 src_vq->mem_ops = &vb2_vmalloc_memops;
666 src_vq->drv_priv = inst;
667 src_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer);
668 src_vq->min_queued_buffers = 1;
669 src_vq->dev = inst->vpu->dev;
670 src_vq->lock = &inst->lock;
671 ret = vb2_queue_init(src_vq);
675 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
676 inst->cap_format.type = dst_vq->type;
677 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
678 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
679 dst_vq->ops = &vpu_vb2_ops;
680 dst_vq->mem_ops = &vb2_dma_contig_memops;
681 if (inst->type == VPU_CORE_TYPE_ENC && inst->use_stream_buffer)
682 dst_vq->mem_ops = &vb2_vmalloc_memops;
683 dst_vq->drv_priv = inst;
684 dst_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer);
685 dst_vq->min_queued_buffers = 1;
686 dst_vq->dev = inst->vpu->dev;
687 dst_vq->lock = &inst->lock;
688 ret = vb2_queue_init(dst_vq);
690 vb2_queue_release(src_vq);
697 static int vpu_v4l2_release(struct vpu_inst *inst)
699 vpu_trace(inst->vpu->dev, "%p\n", inst);
701 vpu_release_core(inst->core);
702 put_device(inst->dev);
704 if (inst->workqueue) {
705 cancel_work_sync(&inst->msg_work);
706 destroy_workqueue(inst->workqueue);
707 inst->workqueue = NULL;
710 v4l2_ctrl_handler_free(&inst->ctrl_handler);
711 mutex_destroy(&inst->lock);
712 v4l2_fh_del(&inst->fh);
713 v4l2_fh_exit(&inst->fh);
715 call_void_vop(inst, cleanup);
720 int vpu_v4l2_open(struct file *file, struct vpu_inst *inst)
722 struct vpu_dev *vpu = video_drvdata(file);
723 struct vpu_func *func;
726 if (!inst || !inst->ops)
729 if (inst->type == VPU_CORE_TYPE_ENC)
730 func = &vpu->encoder;
732 func = &vpu->decoder;
734 atomic_set(&inst->ref_count, 0);
735 atomic_long_set(&inst->last_response_cmd, 0);
738 inst->core = vpu_request_core(vpu, inst->type);
740 inst->dev = get_device(inst->core->dev);
741 mutex_init(&inst->lock);
742 INIT_LIST_HEAD(&inst->cmd_q);
743 inst->id = VPU_INST_NULL_ID;
744 inst->release = vpu_v4l2_release;
745 inst->pid = current->pid;
746 inst->tgid = current->tgid;
747 inst->min_buffer_cap = 2;
748 inst->min_buffer_out = 2;
749 v4l2_fh_init(&inst->fh, func->vfd);
750 v4l2_fh_add(&inst->fh);
752 ret = call_vop(inst, ctrl_init);
756 inst->fh.m2m_ctx = v4l2_m2m_ctx_init(func->m2m_dev, inst, vpu_m2m_queue_init);
757 if (IS_ERR(inst->fh.m2m_ctx)) {
758 dev_err(vpu->dev, "v4l2_m2m_ctx_init fail\n");
759 ret = PTR_ERR(inst->fh.m2m_ctx);
763 inst->fh.ctrl_handler = &inst->ctrl_handler;
764 file->private_data = &inst->fh;
765 inst->state = VPU_CODEC_STATE_DEINIT;
766 inst->workqueue = alloc_ordered_workqueue("vpu_inst", WQ_MEM_RECLAIM);
767 if (inst->workqueue) {
768 INIT_WORK(&inst->msg_work, vpu_inst_run_work);
769 ret = kfifo_init(&inst->msg_fifo,
771 rounddown_pow_of_two(sizeof(inst->msg_buffer)));
773 destroy_workqueue(inst->workqueue);
774 inst->workqueue = NULL;
777 vpu_trace(vpu->dev, "tgid = %d, pid = %d, type = %s, inst = %p\n",
778 inst->tgid, inst->pid, vpu_core_type_desc(inst->type), inst);
786 int vpu_v4l2_close(struct file *file)
788 struct vpu_dev *vpu = video_drvdata(file);
789 struct vpu_inst *inst = to_inst(file);
791 vpu_trace(vpu->dev, "tgid = %d, pid = %d, inst = %p\n", inst->tgid, inst->pid, inst);
794 if (inst->fh.m2m_ctx) {
795 v4l2_m2m_ctx_release(inst->fh.m2m_ctx);
796 inst->fh.m2m_ctx = NULL;
798 call_void_vop(inst, release);
799 vpu_inst_unlock(inst);
801 vpu_inst_unregister(inst);
807 int vpu_add_func(struct vpu_dev *vpu, struct vpu_func *func)
809 struct video_device *vfd;
818 func->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
819 if (IS_ERR(func->m2m_dev)) {
820 dev_err(vpu->dev, "v4l2_m2m_init fail\n");
822 return PTR_ERR(func->m2m_dev);
825 vfd = video_device_alloc();
827 v4l2_m2m_release(func->m2m_dev);
828 dev_err(vpu->dev, "alloc vpu decoder video device fail\n");
831 vfd->release = video_device_release;
832 vfd->vfl_dir = VFL_DIR_M2M;
833 vfd->v4l2_dev = &vpu->v4l2_dev;
834 vfd->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
835 if (func->type == VPU_CORE_TYPE_ENC) {
836 strscpy(vfd->name, "amphion-vpu-encoder", sizeof(vfd->name));
837 vfd->fops = venc_get_fops();
838 vfd->ioctl_ops = venc_get_ioctl_ops();
840 strscpy(vfd->name, "amphion-vpu-decoder", sizeof(vfd->name));
841 vfd->fops = vdec_get_fops();
842 vfd->ioctl_ops = vdec_get_ioctl_ops();
845 ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
847 video_device_release(vfd);
848 v4l2_m2m_release(func->m2m_dev);
851 video_set_drvdata(vfd, vpu);
854 ret = v4l2_m2m_register_media_controller(func->m2m_dev, func->vfd, func->function);
856 v4l2_m2m_release(func->m2m_dev);
857 func->m2m_dev = NULL;
858 video_unregister_device(func->vfd);
866 void vpu_remove_func(struct vpu_func *func)
872 v4l2_m2m_unregister_media_controller(func->m2m_dev);
873 v4l2_m2m_release(func->m2m_dev);
874 func->m2m_dev = NULL;
877 video_unregister_device(func->vfd);