1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for STM32 Digital Camera Memory Interface Pixel Processor
5 * Copyright (C) STMicroelectronics SA 2023
8 * for STMicroelectronics.
11 #include <linux/iopoll.h>
12 #include <linux/pm_runtime.h>
13 #include <media/v4l2-ioctl.h>
14 #include <media/v4l2-mc.h>
15 #include <media/videobuf2-core.h>
16 #include <media/videobuf2-dma-contig.h>
18 #include "dcmipp-common.h"
20 #define DCMIPP_PRSR 0x1f8
21 #define DCMIPP_CMIER 0x3f0
22 #define DCMIPP_CMIER_P0FRAMEIE BIT(9)
23 #define DCMIPP_CMIER_P0VSYNCIE BIT(10)
24 #define DCMIPP_CMIER_P0OVRIE BIT(15)
25 #define DCMIPP_CMIER_P0ALL (DCMIPP_CMIER_P0VSYNCIE |\
26 DCMIPP_CMIER_P0FRAMEIE |\
28 #define DCMIPP_CMSR1 0x3f4
29 #define DCMIPP_CMSR2 0x3f8
30 #define DCMIPP_CMSR2_P0FRAMEF BIT(9)
31 #define DCMIPP_CMSR2_P0VSYNCF BIT(10)
32 #define DCMIPP_CMSR2_P0OVRF BIT(15)
33 #define DCMIPP_CMFCR 0x3fc
34 #define DCMIPP_P0FSCR 0x404
35 #define DCMIPP_P0FSCR_PIPEN BIT(31)
36 #define DCMIPP_P0FCTCR 0x500
37 #define DCMIPP_P0FCTCR_CPTREQ BIT(3)
38 #define DCMIPP_P0DCCNTR 0x5b0
39 #define DCMIPP_P0DCLMTR 0x5b4
40 #define DCMIPP_P0DCLMTR_ENABLE BIT(31)
41 #define DCMIPP_P0DCLMTR_LIMIT_MASK GENMASK(23, 0)
42 #define DCMIPP_P0PPM0AR1 0x5c4
43 #define DCMIPP_P0SR 0x5f8
44 #define DCMIPP_P0SR_CPTACT BIT(23)
46 struct dcmipp_bytecap_pix_map {
51 #define PIXMAP_MBUS_PFMT(mbus, fmt) \
53 .code = MEDIA_BUS_FMT_##mbus, \
54 .pixelformat = V4L2_PIX_FMT_##fmt \
57 static const struct dcmipp_bytecap_pix_map dcmipp_bytecap_pix_map_list[] = {
58 PIXMAP_MBUS_PFMT(RGB565_2X8_LE, RGB565),
59 PIXMAP_MBUS_PFMT(RGB565_1X16, RGB565),
60 PIXMAP_MBUS_PFMT(YUYV8_2X8, YUYV),
61 PIXMAP_MBUS_PFMT(YUYV8_1X16, YUYV),
62 PIXMAP_MBUS_PFMT(YVYU8_2X8, YVYU),
63 PIXMAP_MBUS_PFMT(YVYU8_1X16, YVYU),
64 PIXMAP_MBUS_PFMT(UYVY8_2X8, UYVY),
65 PIXMAP_MBUS_PFMT(UYVY8_1X16, UYVY),
66 PIXMAP_MBUS_PFMT(VYUY8_2X8, VYUY),
67 PIXMAP_MBUS_PFMT(VYUY8_1X16, VYUY),
68 PIXMAP_MBUS_PFMT(Y8_1X8, GREY),
69 PIXMAP_MBUS_PFMT(SBGGR8_1X8, SBGGR8),
70 PIXMAP_MBUS_PFMT(SGBRG8_1X8, SGBRG8),
71 PIXMAP_MBUS_PFMT(SGRBG8_1X8, SGRBG8),
72 PIXMAP_MBUS_PFMT(SRGGB8_1X8, SRGGB8),
73 PIXMAP_MBUS_PFMT(SBGGR10_1X10, SBGGR10),
74 PIXMAP_MBUS_PFMT(SGBRG10_1X10, SGBRG10),
75 PIXMAP_MBUS_PFMT(SGRBG10_1X10, SGRBG10),
76 PIXMAP_MBUS_PFMT(SRGGB10_1X10, SRGGB10),
77 PIXMAP_MBUS_PFMT(SBGGR12_1X12, SBGGR12),
78 PIXMAP_MBUS_PFMT(SGBRG12_1X12, SGBRG12),
79 PIXMAP_MBUS_PFMT(SGRBG12_1X12, SGRBG12),
80 PIXMAP_MBUS_PFMT(SRGGB12_1X12, SRGGB12),
81 PIXMAP_MBUS_PFMT(SBGGR14_1X14, SBGGR14),
82 PIXMAP_MBUS_PFMT(SGBRG14_1X14, SGBRG14),
83 PIXMAP_MBUS_PFMT(SGRBG14_1X14, SGRBG14),
84 PIXMAP_MBUS_PFMT(SRGGB14_1X14, SRGGB14),
85 PIXMAP_MBUS_PFMT(JPEG_1X8, JPEG),
88 static const struct dcmipp_bytecap_pix_map *
89 dcmipp_bytecap_pix_map_by_pixelformat(u32 pixelformat)
93 for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
94 if (dcmipp_bytecap_pix_map_list[i].pixelformat == pixelformat)
95 return &dcmipp_bytecap_pix_map_list[i];
102 struct vb2_v4l2_buffer vb;
106 struct list_head list;
111 DCMIPP_WAIT_FOR_BUFFER,
115 struct dcmipp_bytecap_device {
116 struct dcmipp_ent_device ved;
117 struct video_device vdev;
119 struct v4l2_pix_format format;
120 struct vb2_queue queue;
121 struct list_head buffers;
123 * Protects concurrent calls of buf queue / irq handler
124 * and buffer handling related variables / lists
127 /* mutex used as vdev and queue lock */
130 struct media_pipeline pipe;
131 struct v4l2_subdev *s_subdev;
134 enum dcmipp_state state;
137 * DCMIPP driver is handling 2 buffers
138 * active: buffer into which DCMIPP is currently writing into
139 * next: buffer given to the DCMIPP and which will become
140 * automatically active on next VSYNC
142 struct dcmipp_buf *active, *next;
162 static const struct v4l2_pix_format fmt_default = {
163 .width = DCMIPP_FMT_WIDTH_DEFAULT,
164 .height = DCMIPP_FMT_HEIGHT_DEFAULT,
165 .pixelformat = V4L2_PIX_FMT_RGB565,
166 .field = V4L2_FIELD_NONE,
167 .bytesperline = DCMIPP_FMT_WIDTH_DEFAULT * 2,
168 .sizeimage = DCMIPP_FMT_WIDTH_DEFAULT * DCMIPP_FMT_HEIGHT_DEFAULT * 2,
169 .colorspace = DCMIPP_COLORSPACE_DEFAULT,
170 .ycbcr_enc = DCMIPP_YCBCR_ENC_DEFAULT,
171 .quantization = DCMIPP_QUANTIZATION_DEFAULT,
172 .xfer_func = DCMIPP_XFER_FUNC_DEFAULT,
175 static int dcmipp_bytecap_querycap(struct file *file, void *priv,
176 struct v4l2_capability *cap)
178 strscpy(cap->driver, DCMIPP_PDEV_NAME, sizeof(cap->driver));
179 strscpy(cap->card, KBUILD_MODNAME, sizeof(cap->card));
184 static int dcmipp_bytecap_g_fmt_vid_cap(struct file *file, void *priv,
185 struct v4l2_format *f)
187 struct dcmipp_bytecap_device *vcap = video_drvdata(file);
189 f->fmt.pix = vcap->format;
194 static int dcmipp_bytecap_try_fmt_vid_cap(struct file *file, void *priv,
195 struct v4l2_format *f)
197 struct dcmipp_bytecap_device *vcap = video_drvdata(file);
198 struct v4l2_pix_format *format = &f->fmt.pix;
199 const struct dcmipp_bytecap_pix_map *vpix;
202 /* Don't accept a pixelformat that is not on the table */
203 vpix = dcmipp_bytecap_pix_map_by_pixelformat(format->pixelformat);
205 format->pixelformat = fmt_default.pixelformat;
207 /* Adjust width & height */
208 in_w = format->width;
209 in_h = format->height;
210 v4l_bound_align_image(&format->width, DCMIPP_FRAME_MIN_WIDTH,
211 DCMIPP_FRAME_MAX_WIDTH, 0, &format->height,
212 DCMIPP_FRAME_MIN_HEIGHT, DCMIPP_FRAME_MAX_HEIGHT,
214 if (format->width != in_w || format->height != in_h)
215 dev_dbg(vcap->dev, "resolution updated: %dx%d -> %dx%d\n",
216 in_w, in_h, format->width, format->height);
218 if (format->pixelformat == V4L2_PIX_FMT_JPEG) {
219 format->bytesperline = format->width;
220 format->sizeimage = format->bytesperline * format->height;
222 v4l2_fill_pixfmt(format, format->pixelformat,
223 format->width, format->height);
226 if (format->field == V4L2_FIELD_ANY)
227 format->field = fmt_default.field;
229 dcmipp_colorimetry_clamp(format);
234 static int dcmipp_bytecap_s_fmt_vid_cap(struct file *file, void *priv,
235 struct v4l2_format *f)
237 struct dcmipp_bytecap_device *vcap = video_drvdata(file);
240 /* Do not change the format while stream is on */
241 if (vb2_is_busy(&vcap->queue))
244 ret = dcmipp_bytecap_try_fmt_vid_cap(file, priv, f);
248 dev_dbg(vcap->dev, "%s: format update: old:%ux%u (0x%p4cc, %u, %u, %u, %u) new:%ux%d (0x%p4cc, %u, %u, %u, %u)\n",
251 vcap->format.width, vcap->format.height,
252 &vcap->format.pixelformat, vcap->format.colorspace,
253 vcap->format.quantization, vcap->format.xfer_func,
254 vcap->format.ycbcr_enc,
256 f->fmt.pix.width, f->fmt.pix.height,
257 &f->fmt.pix.pixelformat, f->fmt.pix.colorspace,
258 f->fmt.pix.quantization, f->fmt.pix.xfer_func,
259 f->fmt.pix.ycbcr_enc);
261 vcap->format = f->fmt.pix;
266 static int dcmipp_bytecap_enum_fmt_vid_cap(struct file *file, void *priv,
267 struct v4l2_fmtdesc *f)
269 const struct dcmipp_bytecap_pix_map *vpix;
270 unsigned int index = f->index;
271 unsigned int i, prev_pixelformat = 0;
274 * List up all formats (or only ones matching f->mbus_code), taking
275 * care of removing duplicated entries (due to support of both
276 * parallel & csi 16 bits formats
278 for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
279 vpix = &dcmipp_bytecap_pix_map_list[i];
280 /* Skip formats not matching requested mbus code */
281 if (f->mbus_code && vpix->code != f->mbus_code)
284 /* Skip duplicated pixelformat */
285 if (vpix->pixelformat == prev_pixelformat)
288 prev_pixelformat = vpix->pixelformat;
296 if (i == ARRAY_SIZE(dcmipp_bytecap_pix_map_list))
299 f->pixelformat = vpix->pixelformat;
304 static int dcmipp_bytecap_enum_framesizes(struct file *file, void *fh,
305 struct v4l2_frmsizeenum *fsize)
307 const struct dcmipp_bytecap_pix_map *vpix;
312 /* Only accept code in the pix map table */
313 vpix = dcmipp_bytecap_pix_map_by_pixelformat(fsize->pixel_format);
317 fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
318 fsize->stepwise.min_width = DCMIPP_FRAME_MIN_WIDTH;
319 fsize->stepwise.max_width = DCMIPP_FRAME_MAX_WIDTH;
320 fsize->stepwise.min_height = DCMIPP_FRAME_MIN_HEIGHT;
321 fsize->stepwise.max_height = DCMIPP_FRAME_MAX_HEIGHT;
322 fsize->stepwise.step_width = 1;
323 fsize->stepwise.step_height = 1;
328 static const struct v4l2_file_operations dcmipp_bytecap_fops = {
329 .owner = THIS_MODULE,
330 .open = v4l2_fh_open,
331 .release = vb2_fop_release,
332 .read = vb2_fop_read,
333 .poll = vb2_fop_poll,
334 .unlocked_ioctl = video_ioctl2,
335 .mmap = vb2_fop_mmap,
338 static const struct v4l2_ioctl_ops dcmipp_bytecap_ioctl_ops = {
339 .vidioc_querycap = dcmipp_bytecap_querycap,
341 .vidioc_g_fmt_vid_cap = dcmipp_bytecap_g_fmt_vid_cap,
342 .vidioc_s_fmt_vid_cap = dcmipp_bytecap_s_fmt_vid_cap,
343 .vidioc_try_fmt_vid_cap = dcmipp_bytecap_try_fmt_vid_cap,
344 .vidioc_enum_fmt_vid_cap = dcmipp_bytecap_enum_fmt_vid_cap,
345 .vidioc_enum_framesizes = dcmipp_bytecap_enum_framesizes,
347 .vidioc_reqbufs = vb2_ioctl_reqbufs,
348 .vidioc_create_bufs = vb2_ioctl_create_bufs,
349 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
350 .vidioc_querybuf = vb2_ioctl_querybuf,
351 .vidioc_qbuf = vb2_ioctl_qbuf,
352 .vidioc_dqbuf = vb2_ioctl_dqbuf,
353 .vidioc_expbuf = vb2_ioctl_expbuf,
354 .vidioc_streamon = vb2_ioctl_streamon,
355 .vidioc_streamoff = vb2_ioctl_streamoff,
358 static void dcmipp_start_capture(struct dcmipp_bytecap_device *vcap,
359 struct dcmipp_buf *buf)
361 /* Set buffer address */
362 reg_write(vcap, DCMIPP_P0PPM0AR1, buf->addr);
364 /* Set buffer size */
365 reg_write(vcap, DCMIPP_P0DCLMTR, DCMIPP_P0DCLMTR_ENABLE |
366 ((buf->size / 4) & DCMIPP_P0DCLMTR_LIMIT_MASK));
368 /* Capture request */
369 reg_set(vcap, DCMIPP_P0FCTCR, DCMIPP_P0FCTCR_CPTREQ);
372 static void dcmipp_bytecap_all_buffers_done(struct dcmipp_bytecap_device *vcap,
373 enum vb2_buffer_state state)
375 struct dcmipp_buf *buf, *node;
377 list_for_each_entry_safe(buf, node, &vcap->buffers, list) {
378 list_del_init(&buf->list);
379 vb2_buffer_done(&buf->vb.vb2_buf, state);
383 static int dcmipp_bytecap_start_streaming(struct vb2_queue *vq,
386 struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vq);
387 struct media_entity *entity = &vcap->vdev.entity;
388 struct dcmipp_buf *buf;
389 struct media_pad *pad;
393 memset(&vcap->count, 0, sizeof(vcap->count));
396 * Get source subdev - since link is IMMUTABLE, pointer is cached
397 * within the dcmipp_bytecap_device structure
399 if (!vcap->s_subdev) {
400 pad = media_pad_remote_pad_first(&vcap->vdev.entity.pads[0]);
401 if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
403 vcap->s_subdev = media_entity_to_v4l2_subdev(pad->entity);
404 vcap->s_subdev_pad_nb = pad->index;
407 ret = pm_runtime_resume_and_get(vcap->dev);
409 dev_err(vcap->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
411 goto err_buffer_done;
414 ret = media_pipeline_start(entity->pads, &vcap->pipe);
416 dev_dbg(vcap->dev, "%s: Failed to start streaming, media pipeline start error (%d)\n",
421 ret = v4l2_subdev_enable_streams(vcap->s_subdev,
422 vcap->s_subdev_pad_nb, BIT_ULL(0));
424 goto err_media_pipeline_stop;
426 spin_lock_irq(&vcap->irqlock);
428 /* Enable pipe at the end of programming */
429 reg_set(vcap, DCMIPP_P0FSCR, DCMIPP_P0FSCR_PIPEN);
432 * vb2 framework guarantee that we have at least 'min_queued_buffers'
433 * buffers in the list at this moment
435 vcap->next = list_first_entry(&vcap->buffers, typeof(*buf), list);
436 dev_dbg(vcap->dev, "Start with next [%d] %p phy=%pad\n",
437 vcap->next->vb.vb2_buf.index, vcap->next, &vcap->next->addr);
439 dcmipp_start_capture(vcap, vcap->next);
441 /* Enable interruptions */
442 vcap->cmier |= DCMIPP_CMIER_P0ALL;
443 reg_set(vcap, DCMIPP_CMIER, vcap->cmier);
445 vcap->state = DCMIPP_RUNNING;
447 spin_unlock_irq(&vcap->irqlock);
451 err_media_pipeline_stop:
452 media_pipeline_stop(entity->pads);
454 pm_runtime_put(vcap->dev);
456 spin_lock_irq(&vcap->irqlock);
458 * Return all buffers to vb2 in QUEUED state.
459 * This will give ownership back to userspace
461 dcmipp_bytecap_all_buffers_done(vcap, VB2_BUF_STATE_QUEUED);
463 spin_unlock_irq(&vcap->irqlock);
468 static void dcmipp_dump_status(struct dcmipp_bytecap_device *vcap)
470 struct device *dev = vcap->dev;
472 dev_dbg(dev, "[DCMIPP_PRSR] =%#10.8x\n", reg_read(vcap, DCMIPP_PRSR));
473 dev_dbg(dev, "[DCMIPP_P0SR] =%#10.8x\n", reg_read(vcap, DCMIPP_P0SR));
474 dev_dbg(dev, "[DCMIPP_P0DCCNTR]=%#10.8x\n",
475 reg_read(vcap, DCMIPP_P0DCCNTR));
476 dev_dbg(dev, "[DCMIPP_CMSR1] =%#10.8x\n", reg_read(vcap, DCMIPP_CMSR1));
477 dev_dbg(dev, "[DCMIPP_CMSR2] =%#10.8x\n", reg_read(vcap, DCMIPP_CMSR2));
481 * Stop the stream engine. Any remaining buffers in the stream queue are
482 * dequeued and passed on to the vb2 framework marked as STATE_ERROR.
484 static void dcmipp_bytecap_stop_streaming(struct vb2_queue *vq)
486 struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vq);
490 ret = v4l2_subdev_disable_streams(vcap->s_subdev,
491 vcap->s_subdev_pad_nb, BIT_ULL(0));
493 dev_warn(vcap->dev, "Failed to disable stream\n");
495 /* Stop the media pipeline */
496 media_pipeline_stop(vcap->vdev.entity.pads);
498 /* Disable interruptions */
499 reg_clear(vcap, DCMIPP_CMIER, vcap->cmier);
502 reg_clear(vcap, DCMIPP_P0FCTCR, DCMIPP_P0FCTCR_CPTREQ);
504 /* Wait until CPTACT become 0 */
505 ret = readl_relaxed_poll_timeout(vcap->regs + DCMIPP_P0SR, status,
506 !(status & DCMIPP_P0SR_CPTACT),
508 1000 * USEC_PER_MSEC);
510 dev_warn(vcap->dev, "Timeout when stopping\n");
513 reg_clear(vcap, DCMIPP_P0FSCR, DCMIPP_P0FSCR_PIPEN);
515 spin_lock_irq(&vcap->irqlock);
517 /* Return all queued buffers to vb2 in ERROR state */
518 dcmipp_bytecap_all_buffers_done(vcap, VB2_BUF_STATE_ERROR);
519 INIT_LIST_HEAD(&vcap->buffers);
522 vcap->state = DCMIPP_STOPPED;
524 spin_unlock_irq(&vcap->irqlock);
526 dcmipp_dump_status(vcap);
528 pm_runtime_put(vcap->dev);
530 if (vcap->count.errors)
531 dev_warn(vcap->dev, "Some errors found while streaming: errors=%d (overrun=%d, limit=%d, nactive=%d), underrun=%d, buffers=%d\n",
532 vcap->count.errors, vcap->count.overrun,
533 vcap->count.limit, vcap->count.nactive,
534 vcap->count.underrun, vcap->count.buffers);
537 static int dcmipp_bytecap_buf_prepare(struct vb2_buffer *vb)
539 struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vb->vb2_queue);
540 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
541 struct dcmipp_buf *buf = container_of(vbuf, struct dcmipp_buf, vb);
544 size = vcap->format.sizeimage;
546 if (vb2_plane_size(vb, 0) < size) {
547 dev_err(vcap->dev, "%s data will not fit into plane (%lu < %lu)\n",
548 __func__, vb2_plane_size(vb, 0), size);
552 vb2_set_plane_payload(vb, 0, size);
554 if (!buf->prepared) {
555 /* Get memory addresses */
556 buf->addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
557 buf->size = vb2_plane_size(&buf->vb.vb2_buf, 0);
558 buf->prepared = true;
560 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
562 dev_dbg(vcap->dev, "Setup [%d] phy=%pad size=%zu\n",
563 vb->index, &buf->addr, buf->size);
569 static void dcmipp_bytecap_buf_queue(struct vb2_buffer *vb2_buf)
571 struct dcmipp_bytecap_device *vcap =
572 vb2_get_drv_priv(vb2_buf->vb2_queue);
573 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2_buf);
574 struct dcmipp_buf *buf = container_of(vbuf, struct dcmipp_buf, vb);
576 dev_dbg(vcap->dev, "Queue [%d] %p phy=%pad\n", buf->vb.vb2_buf.index,
579 spin_lock_irq(&vcap->irqlock);
580 list_add_tail(&buf->list, &vcap->buffers);
582 if (vcap->state == DCMIPP_WAIT_FOR_BUFFER) {
584 dev_dbg(vcap->dev, "Restart with next [%d] %p phy=%pad\n",
585 buf->vb.vb2_buf.index, buf, &buf->addr);
587 dcmipp_start_capture(vcap, buf);
589 vcap->state = DCMIPP_RUNNING;
592 spin_unlock_irq(&vcap->irqlock);
595 static int dcmipp_bytecap_queue_setup(struct vb2_queue *vq,
596 unsigned int *nbuffers,
597 unsigned int *nplanes,
598 unsigned int sizes[],
599 struct device *alloc_devs[])
601 struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vq);
604 size = vcap->format.sizeimage;
606 /* Make sure the image size is large enough */
608 return sizes[0] < vcap->format.sizeimage ? -EINVAL : 0;
611 sizes[0] = vcap->format.sizeimage;
613 dev_dbg(vcap->dev, "Setup queue, count=%d, size=%d\n",
619 static int dcmipp_bytecap_buf_init(struct vb2_buffer *vb)
621 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
622 struct dcmipp_buf *buf = container_of(vbuf, struct dcmipp_buf, vb);
624 INIT_LIST_HEAD(&buf->list);
629 static const struct vb2_ops dcmipp_bytecap_qops = {
630 .start_streaming = dcmipp_bytecap_start_streaming,
631 .stop_streaming = dcmipp_bytecap_stop_streaming,
632 .buf_init = dcmipp_bytecap_buf_init,
633 .buf_prepare = dcmipp_bytecap_buf_prepare,
634 .buf_queue = dcmipp_bytecap_buf_queue,
635 .queue_setup = dcmipp_bytecap_queue_setup,
638 static void dcmipp_bytecap_release(struct video_device *vdev)
640 struct dcmipp_bytecap_device *vcap =
641 container_of(vdev, struct dcmipp_bytecap_device, vdev);
643 dcmipp_pads_cleanup(vcap->ved.pads);
644 mutex_destroy(&vcap->lock);
649 void dcmipp_bytecap_ent_release(struct dcmipp_ent_device *ved)
651 struct dcmipp_bytecap_device *vcap =
652 container_of(ved, struct dcmipp_bytecap_device, ved);
654 media_entity_cleanup(ved->ent);
655 vb2_video_unregister_device(&vcap->vdev);
658 static void dcmipp_buffer_done(struct dcmipp_bytecap_device *vcap,
659 struct dcmipp_buf *buf,
663 struct vb2_v4l2_buffer *vbuf;
665 list_del_init(&buf->list);
669 vbuf->sequence = vcap->sequence++;
670 vbuf->field = V4L2_FIELD_NONE;
671 vbuf->vb2_buf.timestamp = ktime_get_ns();
672 vb2_set_plane_payload(&vbuf->vb2_buf, 0, bytesused);
673 vb2_buffer_done(&vbuf->vb2_buf,
674 err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
675 dev_dbg(vcap->dev, "Done [%d] %p phy=%pad\n", buf->vb.vb2_buf.index,
677 vcap->count.buffers++;
680 /* irqlock must be held */
682 dcmipp_bytecap_set_next_frame_or_stop(struct dcmipp_bytecap_device *vcap)
684 if (!vcap->next && list_is_singular(&vcap->buffers)) {
686 * If there is no available buffer (none or a single one in the
687 * list while two are expected), stop the capture (effective
688 * for next frame). On-going frame capture will continue until
689 * FRAME END but no further capture will be done.
691 reg_clear(vcap, DCMIPP_P0FCTCR, DCMIPP_P0FCTCR_CPTREQ);
693 dev_dbg(vcap->dev, "Capture restart is deferred to next buffer queueing\n");
695 vcap->state = DCMIPP_WAIT_FOR_BUFFER;
699 /* If we don't have buffer yet, pick the one after active */
701 vcap->next = list_next_entry(vcap->active, list);
705 * This register is shadowed and will be taken into
706 * account on next VSYNC (start of next frame)
708 reg_write(vcap, DCMIPP_P0PPM0AR1, vcap->next->addr);
709 dev_dbg(vcap->dev, "Write [%d] %p phy=%pad\n",
710 vcap->next->vb.vb2_buf.index, vcap->next, &vcap->next->addr);
713 /* irqlock must be held */
714 static void dcmipp_bytecap_process_frame(struct dcmipp_bytecap_device *vcap,
718 struct dcmipp_buf *buf = vcap->active;
721 vcap->count.nactive++;
722 vcap->count.errors++;
726 if (bytesused > buf->size) {
727 dev_dbg(vcap->dev, "frame larger than expected (%zu > %zu)\n",
728 bytesused, buf->size);
729 /* Clip to buffer size and return buffer to V4L2 in error */
730 bytesused = buf->size;
732 vcap->count.errors++;
736 dcmipp_buffer_done(vcap, buf, bytesused, err);
740 static irqreturn_t dcmipp_bytecap_irq_thread(int irq, void *arg)
742 struct dcmipp_bytecap_device *vcap =
743 container_of(arg, struct dcmipp_bytecap_device, ved);
744 size_t bytesused = 0;
747 spin_lock_irq(&vcap->irqlock);
749 cmsr2 = vcap->cmsr2 & vcap->cmier;
752 * If we have an overrun, a frame-end will probably not be generated,
753 * in that case the active buffer will be recycled as next buffer by
756 if (cmsr2 & DCMIPP_CMSR2_P0OVRF) {
757 vcap->count.errors++;
758 vcap->count.overrun++;
761 if (cmsr2 & DCMIPP_CMSR2_P0FRAMEF) {
764 /* Read captured buffer size */
765 bytesused = reg_read(vcap, DCMIPP_P0DCCNTR);
766 dcmipp_bytecap_process_frame(vcap, bytesused);
769 if (cmsr2 & DCMIPP_CMSR2_P0VSYNCF) {
771 if (vcap->state == DCMIPP_WAIT_FOR_BUFFER) {
772 vcap->count.underrun++;
777 * On VSYNC, the previously set next buffer is going to become
778 * active thanks to the shadowing mechanism of the DCMIPP. In
779 * most of the cases, since a FRAMEEND has already come,
780 * pointer next is NULL since active is reset during the
781 * FRAMEEND handling. However, in case of framerate adjustment,
782 * there are more VSYNC than FRAMEEND. Thus we recycle the
783 * active (but not used) buffer and put it back into next.
785 swap(vcap->active, vcap->next);
786 dcmipp_bytecap_set_next_frame_or_stop(vcap);
790 spin_unlock_irq(&vcap->irqlock);
794 static irqreturn_t dcmipp_bytecap_irq_callback(int irq, void *arg)
796 struct dcmipp_bytecap_device *vcap =
797 container_of(arg, struct dcmipp_bytecap_device, ved);
799 /* Store interrupt status register */
800 vcap->cmsr2 = reg_read(vcap, DCMIPP_CMSR2) & vcap->cmier;
803 /* Clear interrupt */
804 reg_write(vcap, DCMIPP_CMFCR, vcap->cmsr2);
806 return IRQ_WAKE_THREAD;
809 static int dcmipp_bytecap_link_validate(struct media_link *link)
811 struct media_entity *entity = link->sink->entity;
812 struct video_device *vd = media_entity_to_video_device(entity);
813 struct dcmipp_bytecap_device *vcap = container_of(vd,
814 struct dcmipp_bytecap_device, vdev);
815 struct v4l2_subdev *source_sd =
816 media_entity_to_v4l2_subdev(link->source->entity);
817 struct v4l2_subdev_format source_fmt = {
818 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
819 .pad = link->source->index,
823 ret = v4l2_subdev_call(source_sd, pad, get_fmt, NULL, &source_fmt);
827 if (source_fmt.format.width != vcap->format.width ||
828 source_fmt.format.height != vcap->format.height) {
829 dev_err(vcap->dev, "Wrong width or height %ux%u (%ux%u expected)\n",
830 vcap->format.width, vcap->format.height,
831 source_fmt.format.width, source_fmt.format.height);
835 for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
836 if (dcmipp_bytecap_pix_map_list[i].pixelformat ==
837 vcap->format.pixelformat &&
838 dcmipp_bytecap_pix_map_list[i].code ==
839 source_fmt.format.code)
843 if (i == ARRAY_SIZE(dcmipp_bytecap_pix_map_list)) {
844 dev_err(vcap->dev, "mbus code 0x%x do not match capture device format (0x%x)\n",
845 vcap->format.pixelformat, source_fmt.format.code);
852 static const struct media_entity_operations dcmipp_bytecap_entity_ops = {
853 .link_validate = dcmipp_bytecap_link_validate,
856 struct dcmipp_ent_device *dcmipp_bytecap_ent_init(struct device *dev,
857 const char *entity_name,
858 struct v4l2_device *v4l2_dev,
861 struct dcmipp_bytecap_device *vcap;
862 struct video_device *vdev;
864 const unsigned long pad_flag = MEDIA_PAD_FL_SINK;
867 /* Allocate the dcmipp_bytecap_device struct */
868 vcap = kzalloc(sizeof(*vcap), GFP_KERNEL);
870 return ERR_PTR(-ENOMEM);
872 /* Allocate the pads */
873 vcap->ved.pads = dcmipp_pads_init(1, &pad_flag);
874 if (IS_ERR(vcap->ved.pads)) {
875 ret = PTR_ERR(vcap->ved.pads);
879 /* Initialize the media entity */
880 vcap->vdev.entity.name = entity_name;
881 vcap->vdev.entity.function = MEDIA_ENT_F_IO_V4L;
882 vcap->vdev.entity.ops = &dcmipp_bytecap_entity_ops;
883 ret = media_entity_pads_init(&vcap->vdev.entity, 1, vcap->ved.pads);
887 /* Initialize the lock */
888 mutex_init(&vcap->lock);
890 /* Initialize the vb2 queue */
892 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
893 q->io_modes = VB2_MMAP | VB2_DMABUF;
894 q->lock = &vcap->lock;
896 q->buf_struct_size = sizeof(struct dcmipp_buf);
897 q->ops = &dcmipp_bytecap_qops;
898 q->mem_ops = &vb2_dma_contig_memops;
899 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
900 q->min_queued_buffers = 1;
903 /* DCMIPP requires 16 bytes aligned buffers */
904 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
906 dev_err(dev, "Failed to set DMA mask\n");
907 goto err_mutex_destroy;
910 ret = vb2_queue_init(q);
912 dev_err(dev, "%s: vb2 queue init failed (err=%d)\n",
914 goto err_clean_m_ent;
917 /* Initialize buffer list and its lock */
918 INIT_LIST_HEAD(&vcap->buffers);
919 spin_lock_init(&vcap->irqlock);
921 /* Set default frame format */
922 vcap->format = fmt_default;
924 /* Fill the dcmipp_ent_device struct */
925 vcap->ved.ent = &vcap->vdev.entity;
926 vcap->ved.handler = dcmipp_bytecap_irq_callback;
927 vcap->ved.thread_fn = dcmipp_bytecap_irq_thread;
931 /* Initialize the video_device struct */
933 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
935 vdev->release = dcmipp_bytecap_release;
936 vdev->fops = &dcmipp_bytecap_fops;
937 vdev->ioctl_ops = &dcmipp_bytecap_ioctl_ops;
938 vdev->lock = &vcap->lock;
940 vdev->v4l2_dev = v4l2_dev;
941 strscpy(vdev->name, entity_name, sizeof(vdev->name));
942 video_set_drvdata(vdev, &vcap->ved);
944 /* Register the video_device with the v4l2 and the media framework */
945 ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
947 dev_err(dev, "%s: video register failed (err=%d)\n",
948 vcap->vdev.name, ret);
949 goto err_clean_m_ent;
955 media_entity_cleanup(&vcap->vdev.entity);
957 mutex_destroy(&vcap->lock);
959 dcmipp_pads_cleanup(vcap->ved.pads);