1 // SPDX-License-Identifier: GPL-2.0-only
3 * RP1 Camera Front End Driver
5 * Copyright (c) 2021-2024 Raspberry Pi Ltd.
6 * Copyright (c) 2023-2024 Ideas on Board Oy
10 #include <linux/debugfs.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/fwnode.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
19 #include <linux/lcm.h>
20 #include <linux/math.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/property.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28 #include <linux/videodev2.h>
30 #include <media/v4l2-async.h>
31 #include <media/v4l2-common.h>
32 #include <media/v4l2-ctrls.h>
33 #include <media/v4l2-dev.h>
34 #include <media/v4l2-device.h>
35 #include <media/v4l2-event.h>
36 #include <media/v4l2-fwnode.h>
37 #include <media/v4l2-ioctl.h>
38 #include <media/v4l2-mc.h>
39 #include <media/videobuf2-dma-contig.h>
41 #include <linux/media/raspberrypi/pisp_fe_config.h>
42 #include <linux/media/raspberrypi/pisp_fe_statistics.h>
49 #define CREATE_TRACE_POINTS
50 #include "cfe-trace.h"
52 #define CFE_MODULE_NAME "rp1-cfe"
53 #define CFE_VERSION "1.0"
55 #define cfe_dbg(cfe, fmt, arg...) dev_dbg(&(cfe)->pdev->dev, fmt, ##arg)
56 #define cfe_info(cfe, fmt, arg...) dev_info(&(cfe)->pdev->dev, fmt, ##arg)
57 #define cfe_err(cfe, fmt, arg...) dev_err(&(cfe)->pdev->dev, fmt, ##arg)
59 /* MIPICFG registers */
60 #define MIPICFG_CFG 0x004
61 #define MIPICFG_INTR 0x028
62 #define MIPICFG_INTE 0x02c
63 #define MIPICFG_INTF 0x030
64 #define MIPICFG_INTS 0x034
66 #define MIPICFG_CFG_SEL_CSI BIT(0)
68 #define MIPICFG_INT_CSI_DMA BIT(0)
69 #define MIPICFG_INT_CSI_HOST BIT(2)
70 #define MIPICFG_INT_PISP_FE BIT(4)
72 #define BPL_ALIGNMENT 16
73 #define MAX_BYTESPERLINE 0xffffff00
74 #define MAX_BUFFER_SIZE 0xffffff00
76 * Max width is therefore determined by the max stride divided by the number of
79 * However, to avoid overflow issues let's use a 16k maximum. This lets us
80 * calculate 16k * 16k * 4 with 32bits. If we need higher maximums, a careful
81 * review and adjustment of the code is needed so that it will deal with
82 * overflows correctly.
84 #define MAX_WIDTH 16384
85 #define MAX_HEIGHT MAX_WIDTH
86 /* Define a nominal minimum image size */
90 #define MIN_META_WIDTH 4
91 #define MIN_META_HEIGHT 1
93 const struct v4l2_mbus_framefmt cfe_default_format = {
96 .code = MEDIA_BUS_FMT_SRGGB10_1X10,
97 .field = V4L2_FIELD_NONE,
98 .colorspace = V4L2_COLORSPACE_RAW,
99 .ycbcr_enc = V4L2_YCBCR_ENC_601,
100 .quantization = V4L2_QUANTIZATION_FULL_RANGE,
101 .xfer_func = V4L2_XFER_FUNC_NONE,
105 /* CSI2 HW output nodes first. */
110 /* FE only nodes from here on. */
118 struct node_description {
122 unsigned int pad_flags;
123 unsigned int link_pad;
126 /* Must match the ordering of enum ids */
127 static const struct node_description node_desc[NUM_NODES] = {
130 .caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE,
131 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
132 .link_pad = CSI2_PAD_FIRST_SOURCE + 0
135 * At the moment the main userspace component (libcamera) doesn't
136 * support metadata with video nodes that support both video and
137 * metadata. So for the time being this node is set to only support
138 * V4L2_CAP_META_CAPTURE.
142 .caps = V4L2_CAP_META_CAPTURE,
143 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
144 .link_pad = CSI2_PAD_FIRST_SOURCE + 1
148 .caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE,
149 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
150 .link_pad = CSI2_PAD_FIRST_SOURCE + 2
154 .caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE,
155 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
156 .link_pad = CSI2_PAD_FIRST_SOURCE + 3
160 .caps = V4L2_CAP_VIDEO_CAPTURE,
161 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
162 .link_pad = FE_OUTPUT0_PAD
166 .caps = V4L2_CAP_VIDEO_CAPTURE,
167 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
168 .link_pad = FE_OUTPUT1_PAD
172 .caps = V4L2_CAP_META_CAPTURE,
173 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
174 .link_pad = FE_STATS_PAD
178 .caps = V4L2_CAP_META_OUTPUT,
179 .pad_flags = MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MUST_CONNECT,
180 .link_pad = FE_CONFIG_PAD
184 #define is_fe_node(node) (((node)->id) >= FE_OUT0)
185 #define is_csi2_node(node) (!is_fe_node(node))
187 #define node_supports_image_output(node) \
188 (node_desc[(node)->id].caps & V4L2_CAP_VIDEO_CAPTURE)
189 #define node_supports_meta_output(node) \
190 (node_desc[(node)->id].caps & V4L2_CAP_META_CAPTURE)
191 #define node_supports_image_input(node) \
192 (node_desc[(node)->id].caps & V4L2_CAP_VIDEO_OUTPUT)
193 #define node_supports_meta_input(node) \
194 (node_desc[(node)->id].caps & V4L2_CAP_META_OUTPUT)
195 #define node_supports_image(node) \
196 (node_supports_image_output(node) || node_supports_image_input(node))
197 #define node_supports_meta(node) \
198 (node_supports_meta_output(node) || node_supports_meta_input(node))
200 #define is_image_output_node(node) \
201 ((node)->buffer_queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
202 #define is_image_input_node(node) \
203 ((node)->buffer_queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
204 #define is_image_node(node) \
205 (is_image_output_node(node) || is_image_input_node(node))
206 #define is_meta_output_node(node) \
207 ((node)->buffer_queue.type == V4L2_BUF_TYPE_META_CAPTURE)
208 #define is_meta_input_node(node) \
209 ((node)->buffer_queue.type == V4L2_BUF_TYPE_META_OUTPUT)
210 #define is_meta_node(node) \
211 (is_meta_output_node(node) || is_meta_input_node(node))
213 /* To track state across all nodes. */
214 #define NODE_REGISTERED BIT(0)
215 #define NODE_ENABLED BIT(1)
216 #define NODE_STREAMING BIT(2)
217 #define FS_INT BIT(3)
218 #define FE_INT BIT(4)
222 struct vb2_v4l2_buffer vb;
223 struct list_head list;
226 struct cfe_config_buffer {
227 struct cfe_buffer buf;
228 struct pisp_fe_config config;
231 static inline struct cfe_buffer *to_cfe_buffer(struct vb2_buffer *vb)
233 return container_of(vb, struct cfe_buffer, vb.vb2_buf);
237 struct cfe_config_buffer *to_cfe_config_buffer(struct cfe_buffer *buf)
239 return container_of(buf, struct cfe_config_buffer, buf);
245 /* Pointer pointing to current v4l2_buffer */
246 struct cfe_buffer *cur_frm;
247 /* Pointer pointing to next v4l2_buffer */
248 struct cfe_buffer *next_frm;
249 /* Used to store current pixel format */
250 struct v4l2_format vid_fmt;
251 /* Used to store current meta format */
252 struct v4l2_format meta_fmt;
253 /* Buffer queue used in video-buf */
254 struct vb2_queue buffer_queue;
255 /* Queue of filled frames */
256 struct list_head dma_queue;
257 /* lock used to access this structure */
259 /* Identifies video device for this channel */
260 struct video_device video_dev;
261 /* Pointer to the parent handle */
262 struct cfe_device *cfe;
263 /* Media pad for this node */
264 struct media_pad pad;
265 /* Frame-start counter */
266 unsigned int fs_count;
267 /* Timestamp of the current buffer */
272 struct dentry *debugfs;
275 /* peripheral base address */
276 void __iomem *mipi_cfg_base;
281 struct v4l2_device v4l2_dev;
282 struct media_device mdev;
283 struct media_pipeline pipe;
285 /* IRQ lock for node state and DMA queues */
286 spinlock_t state_lock;
291 struct platform_device *pdev;
292 /* subdevice async Notifier */
293 struct v4l2_async_notifier notifier;
295 /* Source sub device */
296 struct v4l2_subdev *source_sd;
297 /* Source subdev's pad */
300 struct cfe_node node[NUM_NODES];
301 DECLARE_BITMAP(node_flags, NUM_STATES * NUM_NODES);
303 struct csi2_device csi2;
304 struct pisp_fe_device fe;
308 /* Mask of enabled streams */
312 static inline bool is_fe_enabled(struct cfe_device *cfe)
314 return cfe->fe_csi2_channel != -1;
317 static inline struct cfe_device *to_cfe_device(struct v4l2_device *v4l2_dev)
319 return container_of(v4l2_dev, struct cfe_device, v4l2_dev);
322 static inline u32 cfg_reg_read(struct cfe_device *cfe, u32 offset)
324 return readl(cfe->mipi_cfg_base + offset);
327 static inline void cfg_reg_write(struct cfe_device *cfe, u32 offset, u32 val)
329 writel(val, cfe->mipi_cfg_base + offset);
332 static bool check_state(struct cfe_device *cfe, unsigned long state,
333 unsigned int node_id)
337 for_each_set_bit(bit, &state, sizeof(state)) {
338 if (!test_bit(bit + (node_id * NUM_STATES), cfe->node_flags))
345 static void set_state(struct cfe_device *cfe, unsigned long state,
346 unsigned int node_id)
350 for_each_set_bit(bit, &state, sizeof(state))
351 set_bit(bit + (node_id * NUM_STATES), cfe->node_flags);
354 static void clear_state(struct cfe_device *cfe, unsigned long state,
355 unsigned int node_id)
359 for_each_set_bit(bit, &state, sizeof(state))
360 clear_bit(bit + (node_id * NUM_STATES), cfe->node_flags);
363 static bool test_any_node(struct cfe_device *cfe, unsigned long cond)
365 for (unsigned int i = 0; i < NUM_NODES; i++) {
366 if (check_state(cfe, cond, i))
373 static bool test_all_nodes(struct cfe_device *cfe, unsigned long precond,
376 for (unsigned int i = 0; i < NUM_NODES; i++) {
377 if (check_state(cfe, precond, i)) {
378 if (!check_state(cfe, cond, i))
386 static int mipi_cfg_regs_show(struct seq_file *s, void *data)
388 struct cfe_device *cfe = s->private;
391 ret = pm_runtime_resume_and_get(&cfe->pdev->dev);
395 #define DUMP(reg) seq_printf(s, #reg " \t0x%08x\n", cfg_reg_read(cfe, reg))
403 pm_runtime_put(&cfe->pdev->dev);
408 DEFINE_SHOW_ATTRIBUTE(mipi_cfg_regs);
410 /* Format setup functions */
411 const struct cfe_fmt *find_format_by_code(u32 code)
413 for (unsigned int i = 0; i < ARRAY_SIZE(formats); i++) {
414 if (formats[i].code == code)
421 const struct cfe_fmt *find_format_by_pix(u32 pixelformat)
423 for (unsigned int i = 0; i < ARRAY_SIZE(formats); i++) {
424 if (formats[i].fourcc == pixelformat)
431 static const struct cfe_fmt *find_format_by_code_and_fourcc(u32 code,
434 for (unsigned int i = 0; i < ARRAY_SIZE(formats); i++) {
435 if (formats[i].code == code && formats[i].fourcc == fourcc)
443 * Given the mbus code, find the 16 bit remapped code. Returns 0 if no remap
446 u32 cfe_find_16bit_code(u32 code)
448 const struct cfe_fmt *cfe_fmt;
450 cfe_fmt = find_format_by_code(code);
452 if (!cfe_fmt || !cfe_fmt->remap[CFE_REMAP_16BIT])
455 cfe_fmt = find_format_by_pix(cfe_fmt->remap[CFE_REMAP_16BIT]);
459 return cfe_fmt->code;
463 * Given the mbus code, find the 8 bit compressed code. Returns 0 if no remap
466 u32 cfe_find_compressed_code(u32 code)
468 const struct cfe_fmt *cfe_fmt;
470 cfe_fmt = find_format_by_code(code);
472 if (!cfe_fmt || !cfe_fmt->remap[CFE_REMAP_COMPRESSED])
475 cfe_fmt = find_format_by_pix(cfe_fmt->remap[CFE_REMAP_COMPRESSED]);
479 return cfe_fmt->code;
482 static void cfe_calc_vid_format_size_bpl(struct cfe_device *cfe,
483 const struct cfe_fmt *fmt,
484 struct v4l2_format *f)
486 unsigned int min_bytesperline;
488 v4l_bound_align_image(&f->fmt.pix.width, MIN_WIDTH, MAX_WIDTH, 2,
489 &f->fmt.pix.height, MIN_HEIGHT, MAX_HEIGHT, 0, 0);
492 ALIGN((f->fmt.pix.width * fmt->depth) >> 3, BPL_ALIGNMENT);
494 if (f->fmt.pix.bytesperline > min_bytesperline &&
495 f->fmt.pix.bytesperline <= MAX_BYTESPERLINE)
496 f->fmt.pix.bytesperline =
497 ALIGN(f->fmt.pix.bytesperline, BPL_ALIGNMENT);
499 f->fmt.pix.bytesperline = min_bytesperline;
501 f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
503 cfe_dbg(cfe, "%s: %p4cc size: %ux%u bpl:%u img_size:%u\n", __func__,
504 &f->fmt.pix.pixelformat, f->fmt.pix.width, f->fmt.pix.height,
505 f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
508 static void cfe_calc_meta_format_size_bpl(struct cfe_device *cfe,
509 const struct cfe_fmt *fmt,
510 struct v4l2_format *f)
512 v4l_bound_align_image(&f->fmt.meta.width, MIN_META_WIDTH, MAX_WIDTH, 2,
513 &f->fmt.meta.height, MIN_META_HEIGHT, MAX_HEIGHT,
516 f->fmt.meta.bytesperline = (f->fmt.meta.width * fmt->depth) >> 3;
517 f->fmt.meta.buffersize = f->fmt.meta.height * f->fmt.pix.bytesperline;
519 cfe_dbg(cfe, "%s: %p4cc size: %ux%u bpl:%u buf_size:%u\n", __func__,
520 &f->fmt.meta.dataformat, f->fmt.meta.width, f->fmt.meta.height,
521 f->fmt.meta.bytesperline, f->fmt.meta.buffersize);
524 static void cfe_schedule_next_csi2_job(struct cfe_device *cfe)
526 struct cfe_buffer *buf;
529 for (unsigned int i = 0; i < CSI2_NUM_CHANNELS; i++) {
530 struct cfe_node *node = &cfe->node[i];
531 unsigned int stride, size;
533 if (!check_state(cfe, NODE_STREAMING, i))
536 buf = list_first_entry(&node->dma_queue, struct cfe_buffer,
538 node->next_frm = buf;
539 list_del(&buf->list);
541 trace_cfe_csi2_schedule(node->id, &buf->vb.vb2_buf);
543 if (is_meta_node(node)) {
544 size = node->meta_fmt.fmt.meta.buffersize;
545 /* We use CSI2_CH_CTRL_PACK_BYTES, so stride == 0 */
548 size = node->vid_fmt.fmt.pix.sizeimage;
549 stride = node->vid_fmt.fmt.pix.bytesperline;
552 addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
553 csi2_set_buffer(&cfe->csi2, node->id, addr, stride, size);
557 static void cfe_schedule_next_pisp_job(struct cfe_device *cfe)
559 struct vb2_buffer *vb2_bufs[FE_NUM_PADS] = { 0 };
560 struct cfe_config_buffer *config_buf;
561 struct cfe_buffer *buf;
563 for (unsigned int i = CSI2_NUM_CHANNELS; i < NUM_NODES; i++) {
564 struct cfe_node *node = &cfe->node[i];
566 if (!check_state(cfe, NODE_STREAMING, i))
569 buf = list_first_entry(&node->dma_queue, struct cfe_buffer,
572 trace_cfe_fe_schedule(node->id, &buf->vb.vb2_buf);
574 node->next_frm = buf;
575 vb2_bufs[node_desc[i].link_pad] = &buf->vb.vb2_buf;
576 list_del(&buf->list);
579 config_buf = to_cfe_config_buffer(cfe->node[FE_CONFIG].next_frm);
580 pisp_fe_submit_job(&cfe->fe, vb2_bufs, &config_buf->config);
583 static bool cfe_check_job_ready(struct cfe_device *cfe)
585 for (unsigned int i = 0; i < NUM_NODES; i++) {
586 struct cfe_node *node = &cfe->node[i];
588 if (!check_state(cfe, NODE_ENABLED, i))
591 if (list_empty(&node->dma_queue))
598 static void cfe_prepare_next_job(struct cfe_device *cfe)
600 trace_cfe_prepare_next_job(is_fe_enabled(cfe));
602 cfe->job_queued = true;
603 cfe_schedule_next_csi2_job(cfe);
604 if (is_fe_enabled(cfe))
605 cfe_schedule_next_pisp_job(cfe);
607 /* Flag if another job is ready after this. */
608 cfe->job_ready = cfe_check_job_ready(cfe);
611 static void cfe_process_buffer_complete(struct cfe_node *node,
612 enum vb2_buffer_state state)
614 trace_cfe_buffer_complete(node->id, &node->cur_frm->vb);
616 node->cur_frm->vb.sequence = node->fs_count - 1;
617 vb2_buffer_done(&node->cur_frm->vb.vb2_buf, state);
620 static void cfe_queue_event_sof(struct cfe_node *node)
622 struct v4l2_event event = {
623 .type = V4L2_EVENT_FRAME_SYNC,
624 .u.frame_sync.frame_sequence = node->fs_count - 1,
627 v4l2_event_queue(&node->video_dev, &event);
630 static void cfe_sof_isr(struct cfe_node *node)
632 struct cfe_device *cfe = node->cfe;
633 bool matching_fs = true;
635 trace_cfe_frame_start(node->id, node->fs_count);
638 * If the sensor is producing unexpected frame event ordering over a
639 * sustained period of time, guard against the possibility of coming
640 * here and orphaning the cur_frm if it's not been dequeued already.
641 * Unfortunately, there is not enough hardware state to tell if this
644 if (WARN(node->cur_frm, "%s: [%s] Orphaned frame at seq %u\n",
645 __func__, node_desc[node->id].name, node->fs_count))
646 cfe_process_buffer_complete(node, VB2_BUF_STATE_ERROR);
648 node->cur_frm = node->next_frm;
649 node->next_frm = NULL;
652 node->ts = ktime_get_ns();
653 for (unsigned int i = 0; i < NUM_NODES; i++) {
654 if (!check_state(cfe, NODE_STREAMING, i) || i == node->id)
657 * This checks if any other node has seen a FS. If yes, use the
658 * same timestamp, eventually across all node buffers.
660 if (cfe->node[i].fs_count >= node->fs_count)
661 node->ts = cfe->node[i].ts;
663 * This checks if all other node have seen a matching FS. If
664 * yes, we can flag another job to be queued.
666 if (matching_fs && cfe->node[i].fs_count != node->fs_count)
671 cfe->job_queued = false;
674 node->cur_frm->vb.vb2_buf.timestamp = node->ts;
676 set_state(cfe, FS_INT, node->id);
677 clear_state(cfe, FE_INT, node->id);
679 if (is_image_output_node(node))
680 cfe_queue_event_sof(node);
683 static void cfe_eof_isr(struct cfe_node *node)
685 struct cfe_device *cfe = node->cfe;
687 trace_cfe_frame_end(node->id, node->fs_count - 1);
690 cfe_process_buffer_complete(node, VB2_BUF_STATE_DONE);
692 node->cur_frm = NULL;
693 set_state(cfe, FE_INT, node->id);
694 clear_state(cfe, FS_INT, node->id);
697 static irqreturn_t cfe_isr(int irq, void *dev)
699 struct cfe_device *cfe = dev;
700 bool sof[NUM_NODES] = { 0 }, eof[NUM_NODES] = { 0 };
703 sts = cfg_reg_read(cfe, MIPICFG_INTS);
705 if (sts & MIPICFG_INT_CSI_DMA)
706 csi2_isr(&cfe->csi2, sof, eof);
708 if (sts & MIPICFG_INT_PISP_FE)
709 pisp_fe_isr(&cfe->fe, sof + CSI2_NUM_CHANNELS,
710 eof + CSI2_NUM_CHANNELS);
712 spin_lock(&cfe->state_lock);
714 for (unsigned int i = 0; i < NUM_NODES; i++) {
715 struct cfe_node *node = &cfe->node[i];
718 * The check_state(NODE_STREAMING) is to ensure we do not loop
719 * over the CSI2_CHx nodes when the FE is active since they
720 * generate interrupts even though the node is not streaming.
722 if (!check_state(cfe, NODE_STREAMING, i) || !(sof[i] || eof[i]))
726 * There are 3 cases where we could get FS + FE_ACK at
728 * 1) FE of the current frame, and FS of the next frame.
729 * 2) FS + FE of the same frame.
730 * 3) FE of the current frame, and FS + FE of the next
731 * frame. To handle this, see the sof handler below.
733 * (1) is handled implicitly by the ordering of the FE and FS
738 * The condition below tests for (2). Run the FS handler
739 * first before the FE handler, both for the current
742 if (sof[i] && !check_state(cfe, FS_INT, i)) {
752 * The condition below tests for (3). In such cases, we
753 * come in here with FS flag set in the node state from
754 * the previous frame since it only gets cleared in
755 * cfe_eof_isr(). Handle the FE for the previous
756 * frame first before the FS handler for the current
759 if (check_state(cfe, FS_INT, node->id) &&
760 !check_state(cfe, FE_INT, node->id)) {
761 cfe_dbg(cfe, "%s: [%s] Handling missing previous FE interrupt\n",
762 __func__, node_desc[node->id].name);
769 if (!cfe->job_queued && cfe->job_ready)
770 cfe_prepare_next_job(cfe);
773 spin_unlock(&cfe->state_lock);
782 static int cfe_get_vc_dt_fallback(struct cfe_device *cfe, u8 *vc, u8 *dt)
784 struct v4l2_subdev_state *state;
785 struct v4l2_mbus_framefmt *fmt;
786 const struct cfe_fmt *cfe_fmt;
788 state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd);
790 fmt = v4l2_subdev_state_get_format(state, CSI2_PAD_SINK, 0);
794 cfe_fmt = find_format_by_code(fmt->code);
799 *dt = cfe_fmt->csi_dt;
804 static int cfe_get_vc_dt(struct cfe_device *cfe, unsigned int channel, u8 *vc,
807 struct v4l2_mbus_frame_desc remote_desc;
808 struct v4l2_subdev_state *state;
813 state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd);
815 ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
816 CSI2_PAD_FIRST_SOURCE + channel, 0, NULL, &sink_stream);
820 ret = v4l2_subdev_call(cfe->source_sd, pad, get_frame_desc,
821 cfe->source_pad, &remote_desc);
822 if (ret == -ENOIOCTLCMD) {
823 cfe_dbg(cfe, "source does not support get_frame_desc, use fallback\n");
824 return cfe_get_vc_dt_fallback(cfe, vc, dt);
826 cfe_err(cfe, "Failed to get frame descriptor\n");
830 if (remote_desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
831 cfe_err(cfe, "Frame descriptor does not describe CSI-2 link");
835 for (i = 0; i < remote_desc.num_entries; i++) {
836 if (remote_desc.entry[i].stream == sink_stream)
840 if (i == remote_desc.num_entries) {
841 cfe_err(cfe, "Stream %u not found in remote frame desc\n",
846 *vc = remote_desc.entry[i].bus.csi2.vc;
847 *dt = remote_desc.entry[i].bus.csi2.dt;
852 static int cfe_start_channel(struct cfe_node *node)
854 struct cfe_device *cfe = node->cfe;
855 struct v4l2_subdev_state *state;
856 struct v4l2_mbus_framefmt *source_fmt;
857 const struct cfe_fmt *fmt;
862 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
864 start_fe = is_fe_enabled(cfe) &&
865 test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING);
867 state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd);
870 unsigned int width, height;
873 cfe_dbg(cfe, "%s: %s using csi2 channel %d\n", __func__,
874 node_desc[FE_OUT0].name, cfe->fe_csi2_channel);
876 ret = cfe_get_vc_dt(cfe, cfe->fe_csi2_channel, &vc, &dt);
880 source_fmt = v4l2_subdev_state_get_format(state,
881 node_desc[cfe->fe_csi2_channel].link_pad);
882 fmt = find_format_by_code(source_fmt->code);
884 width = source_fmt->width;
885 height = source_fmt->height;
887 /* Must have a valid CSI2 datatype. */
888 WARN_ON(!fmt->csi_dt);
891 * Start the associated CSI2 Channel as well.
893 * Must write to the ADDR register to latch the ctrl values
894 * even if we are connected to the front end. Once running,
895 * this is handled by the CSI2 AUTO_ARM mode.
897 csi2_start_channel(&cfe->csi2, cfe->fe_csi2_channel,
898 CSI2_MODE_FE_STREAMING,
899 true, false, width, height, vc, dt);
900 csi2_set_buffer(&cfe->csi2, cfe->fe_csi2_channel, 0, 0, -1);
901 pisp_fe_start(&cfe->fe);
904 if (is_csi2_node(node)) {
905 unsigned int width = 0, height = 0;
908 ret = cfe_get_vc_dt(cfe, node->id, &vc, &dt);
911 csi2_stop_channel(&cfe->csi2,
912 cfe->fe_csi2_channel);
913 pisp_fe_stop(&cfe->fe);
919 u32 mode = CSI2_MODE_NORMAL;
921 source_fmt = v4l2_subdev_state_get_format(state,
922 node_desc[node->id].link_pad);
923 fmt = find_format_by_code(source_fmt->code);
925 /* Must have a valid CSI2 datatype. */
926 WARN_ON(!fmt->csi_dt);
928 if (is_image_output_node(node)) {
931 width = source_fmt->width;
932 height = source_fmt->height;
934 pixfmt = node->vid_fmt.fmt.pix.pixelformat;
936 if (pixfmt == fmt->remap[CFE_REMAP_16BIT]) {
937 mode = CSI2_MODE_REMAP;
938 } else if (pixfmt == fmt->remap[CFE_REMAP_COMPRESSED]) {
939 mode = CSI2_MODE_COMPRESSED;
940 csi2_set_compression(&cfe->csi2, node->id,
941 CSI2_COMPRESSION_DELTA, 0,
945 /* Unconditionally start this CSI2 channel. */
946 csi2_start_channel(&cfe->csi2, node->id,
951 is_meta_node(node) ? true : false,
952 width, height, vc, dt);
955 spin_lock_irqsave(&cfe->state_lock, flags);
956 if (cfe->job_ready && test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING))
957 cfe_prepare_next_job(cfe);
958 spin_unlock_irqrestore(&cfe->state_lock, flags);
963 static void cfe_stop_channel(struct cfe_node *node, bool fe_stop)
965 struct cfe_device *cfe = node->cfe;
967 cfe_dbg(cfe, "%s: [%s] fe_stop %u\n", __func__,
968 node_desc[node->id].name, fe_stop);
971 csi2_stop_channel(&cfe->csi2, cfe->fe_csi2_channel);
972 pisp_fe_stop(&cfe->fe);
975 if (is_csi2_node(node))
976 csi2_stop_channel(&cfe->csi2, node->id);
979 static void cfe_return_buffers(struct cfe_node *node,
980 enum vb2_buffer_state state)
982 struct cfe_device *cfe = node->cfe;
983 struct cfe_buffer *buf, *tmp;
986 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
988 spin_lock_irqsave(&cfe->state_lock, flags);
989 list_for_each_entry_safe(buf, tmp, &node->dma_queue, list) {
990 list_del(&buf->list);
991 trace_cfe_return_buffer(node->id, buf->vb.vb2_buf.index, 2);
992 vb2_buffer_done(&buf->vb.vb2_buf, state);
996 trace_cfe_return_buffer(node->id,
997 node->cur_frm->vb.vb2_buf.index, 0);
998 vb2_buffer_done(&node->cur_frm->vb.vb2_buf, state);
1000 if (node->next_frm && node->cur_frm != node->next_frm) {
1001 trace_cfe_return_buffer(node->id,
1002 node->next_frm->vb.vb2_buf.index, 1);
1003 vb2_buffer_done(&node->next_frm->vb.vb2_buf, state);
1006 node->cur_frm = NULL;
1007 node->next_frm = NULL;
1008 spin_unlock_irqrestore(&cfe->state_lock, flags);
1015 static int cfe_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
1016 unsigned int *nplanes, unsigned int sizes[],
1017 struct device *alloc_devs[])
1019 struct cfe_node *node = vb2_get_drv_priv(vq);
1020 struct cfe_device *cfe = node->cfe;
1021 unsigned int size = is_image_node(node) ?
1022 node->vid_fmt.fmt.pix.sizeimage :
1023 node->meta_fmt.fmt.meta.buffersize;
1025 cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name,
1026 node->buffer_queue.type);
1028 if (vq->max_num_buffers + *nbuffers < 3)
1029 *nbuffers = 3 - vq->max_num_buffers;
1032 if (sizes[0] < size) {
1033 cfe_err(cfe, "sizes[0] %i < size %u\n", sizes[0], size);
1045 static int cfe_buffer_prepare(struct vb2_buffer *vb)
1047 struct cfe_node *node = vb2_get_drv_priv(vb->vb2_queue);
1048 struct cfe_device *cfe = node->cfe;
1049 struct cfe_buffer *buf = to_cfe_buffer(vb);
1052 trace_cfe_buffer_prepare(node->id, vb);
1054 size = is_image_node(node) ? node->vid_fmt.fmt.pix.sizeimage :
1055 node->meta_fmt.fmt.meta.buffersize;
1056 if (vb2_plane_size(vb, 0) < size) {
1057 cfe_err(cfe, "data will not fit into plane (%lu < %lu)\n",
1058 vb2_plane_size(vb, 0), size);
1062 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1064 if (node->id == FE_CONFIG) {
1065 struct cfe_config_buffer *b = to_cfe_config_buffer(buf);
1066 void *addr = vb2_plane_vaddr(vb, 0);
1068 memcpy(&b->config, addr, sizeof(struct pisp_fe_config));
1069 return pisp_fe_validate_config(&cfe->fe, &b->config,
1070 &cfe->node[FE_OUT0].vid_fmt,
1071 &cfe->node[FE_OUT1].vid_fmt);
1077 static void cfe_buffer_queue(struct vb2_buffer *vb)
1079 struct cfe_node *node = vb2_get_drv_priv(vb->vb2_queue);
1080 struct cfe_device *cfe = node->cfe;
1081 struct cfe_buffer *buf = to_cfe_buffer(vb);
1082 unsigned long flags;
1085 spin_lock_irqsave(&cfe->state_lock, flags);
1087 list_add_tail(&buf->list, &node->dma_queue);
1089 if (!cfe->job_ready)
1090 cfe->job_ready = cfe_check_job_ready(cfe);
1092 schedule_now = !cfe->job_queued && cfe->job_ready &&
1093 test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING);
1095 trace_cfe_buffer_queue(node->id, vb, schedule_now);
1098 cfe_prepare_next_job(cfe);
1100 spin_unlock_irqrestore(&cfe->state_lock, flags);
1103 static s64 cfe_get_source_link_freq(struct cfe_device *cfe)
1105 struct v4l2_subdev_state *state;
1109 state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd);
1112 * v4l2_get_link_freq() uses V4L2_CID_LINK_FREQ first, and falls back
1113 * to V4L2_CID_PIXEL_RATE if V4L2_CID_LINK_FREQ is not available.
1115 * With multistream input there is no single pixel rate, and thus we
1116 * cannot use V4L2_CID_PIXEL_RATE, so we pass 0 as the bpp which
1117 * causes v4l2_get_link_freq() to return an error if it falls back to
1118 * V4L2_CID_PIXEL_RATE.
1121 if (state->routing.num_routes == 1) {
1122 struct v4l2_subdev_route *route = &state->routing.routes[0];
1123 struct v4l2_mbus_framefmt *source_fmt;
1124 const struct cfe_fmt *fmt;
1126 source_fmt = v4l2_subdev_state_get_format(state,
1128 route->sink_stream);
1130 fmt = find_format_by_code(source_fmt->code);
1139 link_freq = v4l2_get_link_freq(cfe->source_sd->ctrl_handler, bpp,
1140 2 * cfe->csi2.dphy.active_lanes);
1142 cfe_err(cfe, "failed to get link freq for subdev '%s'\n",
1143 cfe->source_sd->name);
1148 static int cfe_start_streaming(struct vb2_queue *vq, unsigned int count)
1150 struct v4l2_mbus_config mbus_config = { 0 };
1151 struct cfe_node *node = vb2_get_drv_priv(vq);
1152 struct cfe_device *cfe = node->cfe;
1153 struct v4l2_subdev_state *state;
1154 struct v4l2_subdev_route *route;
1158 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1160 if (!check_state(cfe, NODE_ENABLED, node->id)) {
1161 cfe_err(cfe, "%s node link is not enabled.\n",
1162 node_desc[node->id].name);
1167 ret = pm_runtime_resume_and_get(&cfe->pdev->dev);
1169 cfe_err(cfe, "pm_runtime_resume_and_get failed\n");
1173 /* When using the Frontend, we must enable the FE_CONFIG node. */
1174 if (is_fe_enabled(cfe) &&
1175 !check_state(cfe, NODE_ENABLED, cfe->node[FE_CONFIG].id)) {
1176 cfe_err(cfe, "FE enabled, but FE_CONFIG node is not\n");
1181 ret = media_pipeline_start(&node->pad, &cfe->pipe);
1183 cfe_err(cfe, "Failed to start media pipeline: %d\n", ret);
1187 state = v4l2_subdev_lock_and_get_active_state(&cfe->csi2.sd);
1189 clear_state(cfe, FS_INT | FE_INT, node->id);
1190 set_state(cfe, NODE_STREAMING, node->id);
1193 ret = cfe_start_channel(node);
1195 goto err_unlock_state;
1197 if (!test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING)) {
1198 cfe_dbg(cfe, "Streaming on hold, as all nodes are not set to streaming yet\n");
1199 v4l2_subdev_unlock_state(state);
1203 cfg_reg_write(cfe, MIPICFG_CFG, MIPICFG_CFG_SEL_CSI);
1204 cfg_reg_write(cfe, MIPICFG_INTE,
1205 MIPICFG_INT_CSI_DMA | MIPICFG_INT_PISP_FE);
1207 ret = v4l2_subdev_call(cfe->source_sd, pad, get_mbus_config, 0,
1209 if (ret < 0 && ret != -ENOIOCTLCMD) {
1210 cfe_err(cfe, "g_mbus_config failed\n");
1211 goto err_clear_inte;
1214 cfe->csi2.dphy.active_lanes = mbus_config.bus.mipi_csi2.num_data_lanes;
1215 if (!cfe->csi2.dphy.active_lanes)
1216 cfe->csi2.dphy.active_lanes = cfe->csi2.dphy.max_lanes;
1217 if (cfe->csi2.dphy.active_lanes > cfe->csi2.dphy.max_lanes) {
1218 cfe_err(cfe, "Device has requested %u data lanes, which is >%u configured in DT\n",
1219 cfe->csi2.dphy.active_lanes, cfe->csi2.dphy.max_lanes);
1221 goto err_clear_inte;
1224 link_freq = cfe_get_source_link_freq(cfe);
1226 goto err_clear_inte;
1228 cfe->csi2.dphy.dphy_rate = div_s64(link_freq * 2, 1000000);
1229 csi2_open_rx(&cfe->csi2);
1231 cfe->streams_mask = 0;
1233 for_each_active_route(&state->routing, route)
1234 cfe->streams_mask |= BIT_ULL(route->sink_stream);
1236 ret = v4l2_subdev_enable_streams(cfe->source_sd, cfe->source_pad,
1239 cfe_err(cfe, "stream on failed in subdev\n");
1240 goto err_disable_cfe;
1243 cfe_dbg(cfe, "Streaming enabled\n");
1245 v4l2_subdev_unlock_state(state);
1250 csi2_close_rx(&cfe->csi2);
1252 cfg_reg_write(cfe, MIPICFG_INTE, 0);
1254 cfe_stop_channel(node,
1255 is_fe_enabled(cfe) && test_all_nodes(cfe, NODE_ENABLED,
1258 v4l2_subdev_unlock_state(state);
1259 media_pipeline_stop(&node->pad);
1261 pm_runtime_put(&cfe->pdev->dev);
1263 cfe_return_buffers(node, VB2_BUF_STATE_QUEUED);
1264 clear_state(cfe, NODE_STREAMING, node->id);
1269 static void cfe_stop_streaming(struct vb2_queue *vq)
1271 struct cfe_node *node = vb2_get_drv_priv(vq);
1272 struct cfe_device *cfe = node->cfe;
1273 unsigned long flags;
1276 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1278 spin_lock_irqsave(&cfe->state_lock, flags);
1279 fe_stop = is_fe_enabled(cfe) &&
1280 test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING);
1282 cfe->job_ready = false;
1283 clear_state(cfe, NODE_STREAMING, node->id);
1284 spin_unlock_irqrestore(&cfe->state_lock, flags);
1286 cfe_stop_channel(node, fe_stop);
1288 if (!test_any_node(cfe, NODE_STREAMING)) {
1289 struct v4l2_subdev_state *state;
1292 state = v4l2_subdev_lock_and_get_active_state(&cfe->csi2.sd);
1294 ret = v4l2_subdev_disable_streams(cfe->source_sd,
1298 cfe_err(cfe, "stream disable failed in subdev\n");
1300 v4l2_subdev_unlock_state(state);
1302 csi2_close_rx(&cfe->csi2);
1304 cfg_reg_write(cfe, MIPICFG_INTE, 0);
1306 cfe_dbg(cfe, "%s: Streaming disabled\n", __func__);
1309 media_pipeline_stop(&node->pad);
1311 /* Clear all queued buffers for the node */
1312 cfe_return_buffers(node, VB2_BUF_STATE_ERROR);
1314 pm_runtime_put(&cfe->pdev->dev);
1317 static const struct vb2_ops cfe_video_qops = {
1318 .wait_prepare = vb2_ops_wait_prepare,
1319 .wait_finish = vb2_ops_wait_finish,
1320 .queue_setup = cfe_queue_setup,
1321 .buf_prepare = cfe_buffer_prepare,
1322 .buf_queue = cfe_buffer_queue,
1323 .start_streaming = cfe_start_streaming,
1324 .stop_streaming = cfe_stop_streaming,
1331 static int cfe_querycap(struct file *file, void *priv,
1332 struct v4l2_capability *cap)
1334 strscpy(cap->driver, CFE_MODULE_NAME, sizeof(cap->driver));
1335 strscpy(cap->card, CFE_MODULE_NAME, sizeof(cap->card));
1337 cap->capabilities |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE |
1338 V4L2_CAP_META_OUTPUT;
1343 static int cfe_enum_fmt_vid_cap(struct file *file, void *priv,
1344 struct v4l2_fmtdesc *f)
1346 struct cfe_node *node = video_drvdata(file);
1347 struct cfe_device *cfe = node->cfe;
1350 if (!node_supports_image_output(node))
1353 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1355 for (i = 0, j = 0; i < ARRAY_SIZE(formats); i++) {
1356 if (f->mbus_code && formats[i].code != f->mbus_code)
1359 if (formats[i].flags & CFE_FORMAT_FLAG_META_OUT ||
1360 formats[i].flags & CFE_FORMAT_FLAG_META_CAP)
1363 if (is_fe_node(node) &&
1364 !(formats[i].flags & CFE_FORMAT_FLAG_FE_OUT))
1367 if (j == f->index) {
1368 f->pixelformat = formats[i].fourcc;
1369 f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1378 static int cfe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
1380 struct cfe_node *node = video_drvdata(file);
1382 if (!node_supports_image(node))
1390 static int cfe_validate_fmt_vid_cap(struct cfe_node *node,
1391 struct v4l2_format *f)
1393 struct cfe_device *cfe = node->cfe;
1394 const struct cfe_fmt *fmt;
1396 cfe_dbg(cfe, "%s: [%s] %ux%u, V4L2 pix %p4cc\n", __func__,
1397 node_desc[node->id].name, f->fmt.pix.width, f->fmt.pix.height,
1398 &f->fmt.pix.pixelformat);
1400 if (!node_supports_image_output(node))
1404 * Default to a format that works for both CSI2 and FE.
1406 fmt = find_format_by_pix(f->fmt.pix.pixelformat);
1408 fmt = find_format_by_code(MEDIA_BUS_FMT_SBGGR10_1X10);
1410 f->fmt.pix.pixelformat = fmt->fourcc;
1412 if (is_fe_node(node) && fmt->remap[CFE_REMAP_16BIT]) {
1413 f->fmt.pix.pixelformat = fmt->remap[CFE_REMAP_16BIT];
1414 fmt = find_format_by_pix(f->fmt.pix.pixelformat);
1417 f->fmt.pix.field = V4L2_FIELD_NONE;
1419 cfe_calc_vid_format_size_bpl(cfe, fmt, f);
1424 static int cfe_s_fmt_vid_cap(struct file *file, void *priv,
1425 struct v4l2_format *f)
1427 struct cfe_node *node = video_drvdata(file);
1428 struct cfe_device *cfe = node->cfe;
1429 struct vb2_queue *q = &node->buffer_queue;
1435 ret = cfe_validate_fmt_vid_cap(node, f);
1441 cfe_dbg(cfe, "%s: Set %ux%u, V4L2 pix %p4cc\n", __func__,
1442 node->vid_fmt.fmt.pix.width, node->vid_fmt.fmt.pix.height,
1443 &node->vid_fmt.fmt.pix.pixelformat);
1448 static int cfe_try_fmt_vid_cap(struct file *file, void *priv,
1449 struct v4l2_format *f)
1451 struct cfe_node *node = video_drvdata(file);
1452 struct cfe_device *cfe = node->cfe;
1454 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1456 return cfe_validate_fmt_vid_cap(node, f);
1459 static int cfe_enum_fmt_meta(struct file *file, void *priv,
1460 struct v4l2_fmtdesc *f)
1462 struct cfe_node *node = video_drvdata(file);
1463 struct cfe_device *cfe = node->cfe;
1465 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1467 if (!node_supports_meta(node))
1471 case CSI2_CH0...CSI2_CH3:
1472 f->flags = V4L2_FMT_FLAG_META_LINE_BASED;
1476 f->pixelformat = V4L2_META_FMT_GENERIC_8;
1479 f->pixelformat = V4L2_META_FMT_GENERIC_CSI2_10;
1482 f->pixelformat = V4L2_META_FMT_GENERIC_CSI2_12;
1496 f->pixelformat = V4L2_META_FMT_RPI_FE_STATS;
1499 f->pixelformat = V4L2_META_FMT_RPI_FE_CFG;
1506 static int cfe_validate_fmt_meta(struct cfe_node *node, struct v4l2_format *f)
1508 struct cfe_device *cfe = node->cfe;
1509 const struct cfe_fmt *fmt;
1512 case CSI2_CH0...CSI2_CH3:
1513 cfe_dbg(cfe, "%s: [%s] %ux%u, V4L2 meta %p4cc\n", __func__,
1514 node_desc[node->id].name, f->fmt.meta.width,
1515 f->fmt.meta.height, &f->fmt.meta.dataformat);
1519 cfe_dbg(cfe, "%s: [%s] %u bytes, V4L2 meta %p4cc\n", __func__,
1520 node_desc[node->id].name, f->fmt.meta.buffersize,
1521 &f->fmt.meta.dataformat);
1527 if (!node_supports_meta(node))
1531 case CSI2_CH0...CSI2_CH3:
1532 fmt = find_format_by_pix(f->fmt.meta.dataformat);
1533 if (!fmt || !(fmt->flags & CFE_FORMAT_FLAG_META_CAP))
1534 fmt = find_format_by_pix(V4L2_META_FMT_GENERIC_CSI2_10);
1536 f->fmt.meta.dataformat = fmt->fourcc;
1538 cfe_calc_meta_format_size_bpl(cfe, fmt, f);
1542 f->fmt.meta.dataformat = V4L2_META_FMT_RPI_FE_STATS;
1543 f->fmt.meta.buffersize = sizeof(struct pisp_statistics);
1546 f->fmt.meta.dataformat = V4L2_META_FMT_RPI_FE_CFG;
1547 f->fmt.meta.buffersize = sizeof(struct pisp_fe_config);
1554 static int cfe_g_fmt_meta(struct file *file, void *priv, struct v4l2_format *f)
1556 struct cfe_node *node = video_drvdata(file);
1557 struct cfe_device *cfe = node->cfe;
1559 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1561 if (!node_supports_meta(node))
1564 *f = node->meta_fmt;
1569 static int cfe_s_fmt_meta(struct file *file, void *priv, struct v4l2_format *f)
1571 struct cfe_node *node = video_drvdata(file);
1572 struct cfe_device *cfe = node->cfe;
1573 struct vb2_queue *q = &node->buffer_queue;
1576 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1581 if (!node_supports_meta(node))
1584 ret = cfe_validate_fmt_meta(node, f);
1588 node->meta_fmt = *f;
1590 cfe_dbg(cfe, "%s: Set %p4cc\n", __func__,
1591 &node->meta_fmt.fmt.meta.dataformat);
1596 static int cfe_try_fmt_meta(struct file *file, void *priv,
1597 struct v4l2_format *f)
1599 struct cfe_node *node = video_drvdata(file);
1600 struct cfe_device *cfe = node->cfe;
1602 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1603 return cfe_validate_fmt_meta(node, f);
1606 static int cfe_enum_framesizes(struct file *file, void *priv,
1607 struct v4l2_frmsizeenum *fsize)
1609 struct cfe_node *node = video_drvdata(file);
1610 struct cfe_device *cfe = node->cfe;
1611 const struct cfe_fmt *fmt;
1613 cfe_dbg(cfe, "%s [%s]\n", __func__, node_desc[node->id].name);
1615 if (fsize->index > 0)
1618 /* check for valid format */
1619 fmt = find_format_by_pix(fsize->pixel_format);
1621 cfe_dbg(cfe, "Invalid pixel code: %x\n", fsize->pixel_format);
1625 /* TODO: Do we have limits on the step_width? */
1627 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
1628 fsize->stepwise.min_width = MIN_WIDTH;
1629 fsize->stepwise.max_width = MAX_WIDTH;
1630 fsize->stepwise.step_width = 2;
1631 fsize->stepwise.min_height = MIN_HEIGHT;
1632 fsize->stepwise.max_height = MAX_HEIGHT;
1633 fsize->stepwise.step_height = 1;
1638 static int cfe_vb2_ioctl_reqbufs(struct file *file, void *priv,
1639 struct v4l2_requestbuffers *p)
1641 struct video_device *vdev = video_devdata(file);
1642 struct cfe_node *node = video_get_drvdata(vdev);
1643 struct cfe_device *cfe = node->cfe;
1646 cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name,
1649 if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
1650 p->type != V4L2_BUF_TYPE_META_CAPTURE &&
1651 p->type != V4L2_BUF_TYPE_META_OUTPUT)
1654 ret = vb2_queue_change_type(vdev->queue, p->type);
1658 return vb2_ioctl_reqbufs(file, priv, p);
1661 static int cfe_vb2_ioctl_create_bufs(struct file *file, void *priv,
1662 struct v4l2_create_buffers *p)
1664 struct video_device *vdev = video_devdata(file);
1665 struct cfe_node *node = video_get_drvdata(vdev);
1666 struct cfe_device *cfe = node->cfe;
1669 cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name,
1672 if (p->format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
1673 p->format.type != V4L2_BUF_TYPE_META_CAPTURE &&
1674 p->format.type != V4L2_BUF_TYPE_META_OUTPUT)
1677 ret = vb2_queue_change_type(vdev->queue, p->format.type);
1681 return vb2_ioctl_create_bufs(file, priv, p);
1684 static int cfe_subscribe_event(struct v4l2_fh *fh,
1685 const struct v4l2_event_subscription *sub)
1687 struct cfe_node *node = video_get_drvdata(fh->vdev);
1689 switch (sub->type) {
1690 case V4L2_EVENT_FRAME_SYNC:
1691 if (!node_supports_image_output(node))
1694 return v4l2_event_subscribe(fh, sub, 2, NULL);
1695 case V4L2_EVENT_SOURCE_CHANGE:
1696 if (!node_supports_image_output(node) &&
1697 !node_supports_meta_output(node))
1700 return v4l2_event_subscribe(fh, sub, 4, NULL);
1703 return v4l2_ctrl_subscribe_event(fh, sub);
1706 static const struct v4l2_ioctl_ops cfe_ioctl_ops = {
1707 .vidioc_querycap = cfe_querycap,
1708 .vidioc_enum_fmt_vid_cap = cfe_enum_fmt_vid_cap,
1709 .vidioc_g_fmt_vid_cap = cfe_g_fmt,
1710 .vidioc_s_fmt_vid_cap = cfe_s_fmt_vid_cap,
1711 .vidioc_try_fmt_vid_cap = cfe_try_fmt_vid_cap,
1713 .vidioc_enum_fmt_meta_cap = cfe_enum_fmt_meta,
1714 .vidioc_g_fmt_meta_cap = cfe_g_fmt_meta,
1715 .vidioc_s_fmt_meta_cap = cfe_s_fmt_meta,
1716 .vidioc_try_fmt_meta_cap = cfe_try_fmt_meta,
1718 .vidioc_enum_fmt_meta_out = cfe_enum_fmt_meta,
1719 .vidioc_g_fmt_meta_out = cfe_g_fmt_meta,
1720 .vidioc_s_fmt_meta_out = cfe_s_fmt_meta,
1721 .vidioc_try_fmt_meta_out = cfe_try_fmt_meta,
1723 .vidioc_enum_framesizes = cfe_enum_framesizes,
1725 .vidioc_reqbufs = cfe_vb2_ioctl_reqbufs,
1726 .vidioc_create_bufs = cfe_vb2_ioctl_create_bufs,
1727 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1728 .vidioc_querybuf = vb2_ioctl_querybuf,
1729 .vidioc_qbuf = vb2_ioctl_qbuf,
1730 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1731 .vidioc_expbuf = vb2_ioctl_expbuf,
1732 .vidioc_streamon = vb2_ioctl_streamon,
1733 .vidioc_streamoff = vb2_ioctl_streamoff,
1735 .vidioc_subscribe_event = cfe_subscribe_event,
1736 .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1739 static void cfe_notify(struct v4l2_subdev *sd, unsigned int notification,
1742 struct cfe_device *cfe = to_cfe_device(sd->v4l2_dev);
1744 switch (notification) {
1745 case V4L2_DEVICE_NOTIFY_EVENT:
1746 for (unsigned int i = 0; i < NUM_NODES; i++) {
1747 struct cfe_node *node = &cfe->node[i];
1749 if (check_state(cfe, NODE_REGISTERED, i))
1752 v4l2_event_queue(&node->video_dev, arg);
1760 /* cfe capture driver file operations */
1761 static const struct v4l2_file_operations cfe_fops = {
1762 .owner = THIS_MODULE,
1763 .open = v4l2_fh_open,
1764 .release = vb2_fop_release,
1765 .poll = vb2_fop_poll,
1766 .unlocked_ioctl = video_ioctl2,
1767 .mmap = vb2_fop_mmap,
1770 static int cfe_video_link_validate(struct media_link *link)
1772 struct video_device *vd = container_of(link->sink->entity,
1773 struct video_device, entity);
1774 struct cfe_node *node = container_of(vd, struct cfe_node, video_dev);
1775 struct cfe_device *cfe = node->cfe;
1776 struct v4l2_mbus_framefmt *source_fmt;
1777 struct v4l2_subdev_state *state;
1778 struct v4l2_subdev *source_sd;
1781 cfe_dbg(cfe, "%s: [%s] link \"%s\":%u -> \"%s\":%u\n", __func__,
1782 node_desc[node->id].name,
1783 link->source->entity->name, link->source->index,
1784 link->sink->entity->name, link->sink->index);
1786 if (!media_entity_remote_source_pad_unique(link->sink->entity)) {
1787 cfe_err(cfe, "video node %s pad not connected\n", vd->name);
1791 source_sd = media_entity_to_v4l2_subdev(link->source->entity);
1793 state = v4l2_subdev_lock_and_get_active_state(source_sd);
1795 source_fmt = v4l2_subdev_state_get_format(state, link->source->index);
1801 if (is_image_output_node(node)) {
1802 struct v4l2_pix_format *pix_fmt = &node->vid_fmt.fmt.pix;
1803 const struct cfe_fmt *fmt;
1805 if (source_fmt->width != pix_fmt->width ||
1806 source_fmt->height != pix_fmt->height) {
1807 cfe_err(cfe, "Wrong width or height %ux%u (remote pad set to %ux%u)\n",
1808 pix_fmt->width, pix_fmt->height,
1809 source_fmt->width, source_fmt->height);
1814 fmt = find_format_by_code_and_fourcc(source_fmt->code,
1815 pix_fmt->pixelformat);
1817 cfe_err(cfe, "Format mismatch!\n");
1821 } else if (is_csi2_node(node) && is_meta_output_node(node)) {
1822 struct v4l2_meta_format *meta_fmt = &node->meta_fmt.fmt.meta;
1823 const struct cfe_fmt *fmt;
1825 if (source_fmt->width != meta_fmt->width ||
1826 source_fmt->height != meta_fmt->height) {
1827 cfe_err(cfe, "Wrong width or height %ux%u (remote pad set to %ux%u)\n",
1828 meta_fmt->width, meta_fmt->height,
1829 source_fmt->width, source_fmt->height);
1834 fmt = find_format_by_code_and_fourcc(source_fmt->code,
1835 meta_fmt->dataformat);
1837 cfe_err(cfe, "Format mismatch!\n");
1844 v4l2_subdev_unlock_state(state);
1849 static const struct media_entity_operations cfe_media_entity_ops = {
1850 .link_validate = cfe_video_link_validate,
1853 static int cfe_video_link_notify(struct media_link *link, u32 flags,
1854 unsigned int notification)
1856 struct media_device *mdev = link->graph_obj.mdev;
1857 struct cfe_device *cfe = container_of(mdev, struct cfe_device, mdev);
1858 struct media_entity *fe = &cfe->fe.sd.entity;
1859 struct media_entity *csi2 = &cfe->csi2.sd.entity;
1860 unsigned long lock_flags;
1862 if (notification != MEDIA_DEV_NOTIFY_POST_LINK_CH)
1865 cfe_dbg(cfe, "%s: %s[%u] -> %s[%u] 0x%x", __func__,
1866 link->source->entity->name, link->source->index,
1867 link->sink->entity->name, link->sink->index, flags);
1869 spin_lock_irqsave(&cfe->state_lock, lock_flags);
1871 for (unsigned int i = 0; i < NUM_NODES; i++) {
1872 if (link->sink->entity != &cfe->node[i].video_dev.entity &&
1873 link->source->entity != &cfe->node[i].video_dev.entity)
1876 if (link->flags & MEDIA_LNK_FL_ENABLED)
1877 set_state(cfe, NODE_ENABLED, i);
1879 clear_state(cfe, NODE_ENABLED, i);
1884 spin_unlock_irqrestore(&cfe->state_lock, lock_flags);
1886 if (link->source->entity != csi2)
1888 if (link->sink->entity != fe)
1890 if (link->sink->index != 0)
1893 cfe->fe_csi2_channel = -1;
1894 if (link->flags & MEDIA_LNK_FL_ENABLED) {
1895 if (link->source->index == node_desc[CSI2_CH0].link_pad)
1896 cfe->fe_csi2_channel = CSI2_CH0;
1897 else if (link->source->index == node_desc[CSI2_CH1].link_pad)
1898 cfe->fe_csi2_channel = CSI2_CH1;
1899 else if (link->source->index == node_desc[CSI2_CH2].link_pad)
1900 cfe->fe_csi2_channel = CSI2_CH2;
1901 else if (link->source->index == node_desc[CSI2_CH3].link_pad)
1902 cfe->fe_csi2_channel = CSI2_CH3;
1905 if (is_fe_enabled(cfe))
1906 cfe_dbg(cfe, "%s: Found CSI2:%d -> FE:0 link\n", __func__,
1907 cfe->fe_csi2_channel);
1909 cfe_dbg(cfe, "%s: Unable to find CSI2:x -> FE:0 link\n",
1915 static const struct media_device_ops cfe_media_device_ops = {
1916 .link_notify = cfe_video_link_notify,
1919 static void cfe_release(struct kref *kref)
1921 struct cfe_device *cfe = container_of(kref, struct cfe_device, kref);
1923 media_device_cleanup(&cfe->mdev);
1928 static void cfe_put(struct cfe_device *cfe)
1930 kref_put(&cfe->kref, cfe_release);
1933 static void cfe_get(struct cfe_device *cfe)
1935 kref_get(&cfe->kref);
1938 static void cfe_node_release(struct video_device *vdev)
1940 struct cfe_node *node = video_get_drvdata(vdev);
1945 static int cfe_register_node(struct cfe_device *cfe, int id)
1947 struct video_device *vdev;
1948 const struct cfe_fmt *fmt;
1949 struct vb2_queue *q;
1950 struct cfe_node *node = &cfe->node[id];
1956 if (node_supports_image(node)) {
1957 if (node_supports_image_output(node))
1958 node->vid_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1960 node->vid_fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
1962 fmt = find_format_by_code(cfe_default_format.code);
1964 cfe_err(cfe, "Failed to find format code\n");
1968 node->vid_fmt.fmt.pix.pixelformat = fmt->fourcc;
1969 v4l2_fill_pix_format(&node->vid_fmt.fmt.pix,
1970 &cfe_default_format);
1972 ret = cfe_validate_fmt_vid_cap(node, &node->vid_fmt);
1977 if (node_supports_meta(node)) {
1978 if (node_supports_meta_output(node))
1979 node->meta_fmt.type = V4L2_BUF_TYPE_META_CAPTURE;
1981 node->meta_fmt.type = V4L2_BUF_TYPE_META_OUTPUT;
1983 ret = cfe_validate_fmt_meta(node, &node->meta_fmt);
1988 mutex_init(&node->lock);
1990 q = &node->buffer_queue;
1991 q->type = node_supports_image(node) ? node->vid_fmt.type :
1992 node->meta_fmt.type;
1993 q->io_modes = VB2_MMAP | VB2_DMABUF;
1995 q->ops = &cfe_video_qops;
1996 q->mem_ops = &vb2_dma_contig_memops;
1997 q->buf_struct_size = id == FE_CONFIG ? sizeof(struct cfe_config_buffer)
1998 : sizeof(struct cfe_buffer);
1999 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
2000 q->lock = &node->lock;
2001 q->min_queued_buffers = 1;
2002 q->dev = &cfe->pdev->dev;
2004 ret = vb2_queue_init(q);
2006 cfe_err(cfe, "vb2_queue_init() failed\n");
2010 INIT_LIST_HEAD(&node->dma_queue);
2012 vdev = &node->video_dev;
2013 vdev->release = cfe_node_release;
2014 vdev->fops = &cfe_fops;
2015 vdev->ioctl_ops = &cfe_ioctl_ops;
2016 vdev->entity.ops = &cfe_media_entity_ops;
2017 vdev->v4l2_dev = &cfe->v4l2_dev;
2018 vdev->vfl_dir = (node_supports_image_output(node) ||
2019 node_supports_meta_output(node)) ?
2023 vdev->lock = &node->lock;
2024 vdev->device_caps = node_desc[id].caps;
2025 vdev->device_caps |= V4L2_CAP_STREAMING | V4L2_CAP_IO_MC;
2027 /* Define the device names */
2028 snprintf(vdev->name, sizeof(vdev->name), "%s-%s", CFE_MODULE_NAME,
2029 node_desc[id].name);
2031 video_set_drvdata(vdev, node);
2032 node->pad.flags = node_desc[id].pad_flags;
2033 media_entity_pads_init(&vdev->entity, 1, &node->pad);
2035 if (!node_supports_image(node)) {
2036 v4l2_disable_ioctl(&node->video_dev,
2037 VIDIOC_ENUM_FRAMEINTERVALS);
2038 v4l2_disable_ioctl(&node->video_dev, VIDIOC_ENUM_FRAMESIZES);
2041 ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
2043 cfe_err(cfe, "Unable to register video device %s\n",
2048 cfe_info(cfe, "Registered [%s] node id %d as /dev/video%u\n",
2049 vdev->name, id, vdev->num);
2052 * Acquire a reference to cfe, which will be released when the video
2053 * device will be unregistered and userspace will have closed all open
2057 set_state(cfe, NODE_REGISTERED, id);
2062 static void cfe_unregister_nodes(struct cfe_device *cfe)
2064 for (unsigned int i = 0; i < NUM_NODES; i++) {
2065 struct cfe_node *node = &cfe->node[i];
2067 if (check_state(cfe, NODE_REGISTERED, i)) {
2068 clear_state(cfe, NODE_REGISTERED, i);
2069 video_unregister_device(&node->video_dev);
2074 static int cfe_link_node_pads(struct cfe_device *cfe)
2076 struct media_pad *remote_pad;
2079 /* Source -> CSI2 */
2081 ret = v4l2_create_fwnode_links_to_pad(cfe->source_sd,
2082 &cfe->csi2.pad[CSI2_PAD_SINK],
2083 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
2086 cfe_err(cfe, "Failed to create links to the source: %d\n", ret);
2090 remote_pad = media_pad_remote_pad_unique(&cfe->csi2.pad[CSI2_PAD_SINK]);
2091 if (IS_ERR(remote_pad)) {
2092 ret = PTR_ERR(remote_pad);
2093 cfe_err(cfe, "Failed to get unique remote source pad: %d\n",
2098 cfe->source_pad = remote_pad->index;
2100 for (unsigned int i = 0; i < CSI2_NUM_CHANNELS; i++) {
2101 struct cfe_node *node = &cfe->node[i];
2103 if (!check_state(cfe, NODE_REGISTERED, i))
2106 /* CSI2 channel # -> /dev/video# */
2107 ret = media_create_pad_link(&cfe->csi2.sd.entity,
2108 node_desc[i].link_pad,
2109 &node->video_dev.entity, 0, 0);
2113 if (node_supports_image(node)) {
2114 /* CSI2 channel # -> FE Input */
2115 ret = media_create_pad_link(&cfe->csi2.sd.entity,
2116 node_desc[i].link_pad,
2124 for (unsigned int i = CSI2_NUM_CHANNELS; i < NUM_NODES; i++) {
2125 struct cfe_node *node = &cfe->node[i];
2126 struct media_entity *src, *dst;
2127 unsigned int src_pad, dst_pad;
2129 if (node_desc[i].pad_flags & MEDIA_PAD_FL_SINK) {
2130 /* FE -> /dev/video# */
2131 src = &cfe->fe.sd.entity;
2132 src_pad = node_desc[i].link_pad;
2133 dst = &node->video_dev.entity;
2136 /* /dev/video# -> FE */
2137 dst = &cfe->fe.sd.entity;
2138 dst_pad = node_desc[i].link_pad;
2139 src = &node->video_dev.entity;
2143 ret = media_create_pad_link(src, src_pad, dst, dst_pad, 0);
2151 static int cfe_probe_complete(struct cfe_device *cfe)
2155 cfe->v4l2_dev.notify = cfe_notify;
2157 for (unsigned int i = 0; i < NUM_NODES; i++) {
2158 ret = cfe_register_node(cfe, i);
2160 cfe_err(cfe, "Unable to register video node %u.\n", i);
2165 ret = cfe_link_node_pads(cfe);
2167 cfe_err(cfe, "Unable to link node pads.\n");
2171 ret = v4l2_device_register_subdev_nodes(&cfe->v4l2_dev);
2173 cfe_err(cfe, "Unable to register subdev nodes.\n");
2180 cfe_unregister_nodes(cfe);
2184 static int cfe_async_bound(struct v4l2_async_notifier *notifier,
2185 struct v4l2_subdev *subdev,
2186 struct v4l2_async_connection *asd)
2188 struct cfe_device *cfe = to_cfe_device(notifier->v4l2_dev);
2190 if (cfe->source_sd) {
2191 cfe_err(cfe, "Rejecting subdev %s (Already set!!)",
2196 cfe->source_sd = subdev;
2198 cfe_dbg(cfe, "Using source %s for capture\n", subdev->name);
2203 static int cfe_async_complete(struct v4l2_async_notifier *notifier)
2205 struct cfe_device *cfe = to_cfe_device(notifier->v4l2_dev);
2207 return cfe_probe_complete(cfe);
2210 static const struct v4l2_async_notifier_operations cfe_async_ops = {
2211 .bound = cfe_async_bound,
2212 .complete = cfe_async_complete,
2215 static int cfe_register_async_nf(struct cfe_device *cfe)
2217 struct platform_device *pdev = cfe->pdev;
2218 struct v4l2_fwnode_endpoint ep = { .bus_type = V4L2_MBUS_CSI2_DPHY };
2219 struct fwnode_handle *local_ep_fwnode;
2220 struct v4l2_async_connection *asd;
2223 local_ep_fwnode = fwnode_graph_get_endpoint_by_id(pdev->dev.fwnode, 0,
2225 if (!local_ep_fwnode) {
2226 cfe_err(cfe, "Failed to find local endpoint fwnode\n");
2230 /* Parse the local endpoint and validate its configuration. */
2231 ret = v4l2_fwnode_endpoint_parse(local_ep_fwnode, &ep);
2233 cfe_err(cfe, "Failed to find remote endpoint fwnode\n");
2234 goto err_put_local_fwnode;
2237 for (unsigned int lane = 0; lane < ep.bus.mipi_csi2.num_data_lanes;
2239 if (ep.bus.mipi_csi2.data_lanes[lane] != lane + 1) {
2240 cfe_err(cfe, "Data lanes reordering not supported\n");
2242 goto err_put_local_fwnode;
2246 cfe->csi2.dphy.max_lanes = ep.bus.mipi_csi2.num_data_lanes;
2247 cfe->csi2.bus_flags = ep.bus.mipi_csi2.flags;
2249 /* Initialize and register the async notifier. */
2250 v4l2_async_nf_init(&cfe->notifier, &cfe->v4l2_dev);
2251 cfe->notifier.ops = &cfe_async_ops;
2253 asd = v4l2_async_nf_add_fwnode_remote(&cfe->notifier, local_ep_fwnode,
2254 struct v4l2_async_connection);
2257 cfe_err(cfe, "Error adding subdevice: %d\n", ret);
2258 goto err_put_local_fwnode;
2261 ret = v4l2_async_nf_register(&cfe->notifier);
2263 cfe_err(cfe, "Error registering async notifier: %d\n", ret);
2264 goto err_nf_cleanup;
2267 fwnode_handle_put(local_ep_fwnode);
2272 v4l2_async_nf_cleanup(&cfe->notifier);
2273 err_put_local_fwnode:
2274 fwnode_handle_put(local_ep_fwnode);
2279 static int cfe_probe(struct platform_device *pdev)
2281 struct cfe_device *cfe;
2282 char debugfs_name[32];
2285 cfe = kzalloc(sizeof(*cfe), GFP_KERNEL);
2289 platform_set_drvdata(pdev, cfe);
2291 kref_init(&cfe->kref);
2293 cfe->fe_csi2_channel = -1;
2294 spin_lock_init(&cfe->state_lock);
2296 cfe->csi2.base = devm_platform_ioremap_resource(pdev, 0);
2297 if (IS_ERR(cfe->csi2.base)) {
2298 dev_err(&pdev->dev, "Failed to get dma io block\n");
2299 ret = PTR_ERR(cfe->csi2.base);
2303 cfe->csi2.dphy.base = devm_platform_ioremap_resource(pdev, 1);
2304 if (IS_ERR(cfe->csi2.dphy.base)) {
2305 dev_err(&pdev->dev, "Failed to get host io block\n");
2306 ret = PTR_ERR(cfe->csi2.dphy.base);
2310 cfe->mipi_cfg_base = devm_platform_ioremap_resource(pdev, 2);
2311 if (IS_ERR(cfe->mipi_cfg_base)) {
2312 dev_err(&pdev->dev, "Failed to get mipi cfg io block\n");
2313 ret = PTR_ERR(cfe->mipi_cfg_base);
2317 cfe->fe.base = devm_platform_ioremap_resource(pdev, 3);
2318 if (IS_ERR(cfe->fe.base)) {
2319 dev_err(&pdev->dev, "Failed to get pisp fe io block\n");
2320 ret = PTR_ERR(cfe->fe.base);
2324 ret = platform_get_irq(pdev, 0);
2330 ret = devm_request_irq(&pdev->dev, ret, cfe_isr, 0, "rp1-cfe", cfe);
2332 dev_err(&pdev->dev, "Unable to request interrupt\n");
2337 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2339 dev_err(&pdev->dev, "DMA enable failed\n");
2343 ret = vb2_dma_contig_set_max_seg_size(&pdev->dev, UINT_MAX);
2347 /* TODO: Enable clock only when running. */
2348 cfe->clk = devm_clk_get(&pdev->dev, NULL);
2349 if (IS_ERR(cfe->clk)) {
2350 ret = dev_err_probe(&pdev->dev, PTR_ERR(cfe->clk),
2351 "clock not found\n");
2355 cfe->mdev.dev = &pdev->dev;
2356 cfe->mdev.ops = &cfe_media_device_ops;
2357 strscpy(cfe->mdev.model, CFE_MODULE_NAME, sizeof(cfe->mdev.model));
2358 strscpy(cfe->mdev.serial, "", sizeof(cfe->mdev.serial));
2359 snprintf(cfe->mdev.bus_info, sizeof(cfe->mdev.bus_info), "platform:%s",
2360 dev_name(&pdev->dev));
2362 media_device_init(&cfe->mdev);
2364 cfe->v4l2_dev.mdev = &cfe->mdev;
2366 ret = v4l2_device_register(&pdev->dev, &cfe->v4l2_dev);
2368 cfe_err(cfe, "Unable to register v4l2 device.\n");
2372 snprintf(debugfs_name, sizeof(debugfs_name), "rp1-cfe:%s",
2373 dev_name(&pdev->dev));
2374 cfe->debugfs = debugfs_create_dir(debugfs_name, NULL);
2375 debugfs_create_file("regs", 0440, cfe->debugfs, cfe,
2376 &mipi_cfg_regs_fops);
2378 /* Enable the block power domain */
2379 pm_runtime_enable(&pdev->dev);
2381 ret = pm_runtime_resume_and_get(&cfe->pdev->dev);
2383 goto err_runtime_disable;
2385 cfe->csi2.v4l2_dev = &cfe->v4l2_dev;
2386 ret = csi2_init(&cfe->csi2, cfe->debugfs);
2388 cfe_err(cfe, "Failed to init csi2 (%d)\n", ret);
2389 goto err_runtime_put;
2392 cfe->fe.v4l2_dev = &cfe->v4l2_dev;
2393 ret = pisp_fe_init(&cfe->fe, cfe->debugfs);
2395 cfe_err(cfe, "Failed to init pisp fe (%d)\n", ret);
2396 goto err_csi2_uninit;
2399 cfe->mdev.hw_revision = cfe->fe.hw_revision;
2400 ret = media_device_register(&cfe->mdev);
2402 cfe_err(cfe, "Unable to register media-controller device.\n");
2403 goto err_pisp_fe_uninit;
2406 ret = cfe_register_async_nf(cfe);
2408 cfe_err(cfe, "Failed to connect subdevs\n");
2409 goto err_media_unregister;
2412 pm_runtime_put(&cfe->pdev->dev);
2416 err_media_unregister:
2417 media_device_unregister(&cfe->mdev);
2419 pisp_fe_uninit(&cfe->fe);
2421 csi2_uninit(&cfe->csi2);
2423 pm_runtime_put(&cfe->pdev->dev);
2424 err_runtime_disable:
2425 pm_runtime_disable(&pdev->dev);
2426 debugfs_remove(cfe->debugfs);
2427 v4l2_device_unregister(&cfe->v4l2_dev);
2434 static void cfe_remove(struct platform_device *pdev)
2436 struct cfe_device *cfe = platform_get_drvdata(pdev);
2438 debugfs_remove(cfe->debugfs);
2440 v4l2_async_nf_unregister(&cfe->notifier);
2441 v4l2_async_nf_cleanup(&cfe->notifier);
2443 media_device_unregister(&cfe->mdev);
2444 cfe_unregister_nodes(cfe);
2446 pisp_fe_uninit(&cfe->fe);
2447 csi2_uninit(&cfe->csi2);
2449 pm_runtime_disable(&pdev->dev);
2451 v4l2_device_unregister(&cfe->v4l2_dev);
2456 static int cfe_runtime_suspend(struct device *dev)
2458 struct platform_device *pdev = to_platform_device(dev);
2459 struct cfe_device *cfe = platform_get_drvdata(pdev);
2461 clk_disable_unprepare(cfe->clk);
2466 static int cfe_runtime_resume(struct device *dev)
2468 struct platform_device *pdev = to_platform_device(dev);
2469 struct cfe_device *cfe = platform_get_drvdata(pdev);
2472 ret = clk_prepare_enable(cfe->clk);
2474 dev_err(dev, "Unable to enable clock\n");
2481 static const struct dev_pm_ops cfe_pm_ops = {
2482 SET_RUNTIME_PM_OPS(cfe_runtime_suspend, cfe_runtime_resume, NULL)
2483 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2484 pm_runtime_force_resume)
2487 static const struct of_device_id cfe_of_match[] = {
2488 { .compatible = "raspberrypi,rp1-cfe" },
2491 MODULE_DEVICE_TABLE(of, cfe_of_match);
2493 static struct platform_driver cfe_driver = {
2495 .remove = cfe_remove,
2497 .name = CFE_MODULE_NAME,
2498 .of_match_table = cfe_of_match,
2503 module_platform_driver(cfe_driver);
2507 MODULE_DESCRIPTION("Raspberry Pi RP1 Camera Front End driver");
2508 MODULE_LICENSE("GPL");
2509 MODULE_VERSION(CFE_VERSION);