1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017,2020 Intel Corporation
5 * Based partially on Intel IPU4 driver written by
14 #include <linux/bitops.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/iopoll.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/pfn.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/property.h>
24 #include <linux/vmalloc.h>
25 #include <media/v4l2-ctrls.h>
26 #include <media/v4l2-device.h>
27 #include <media/v4l2-event.h>
28 #include <media/v4l2-fwnode.h>
29 #include <media/v4l2-ioctl.h>
30 #include <media/videobuf2-dma-sg.h>
32 #include "ipu3-cio2.h"
34 struct ipu3_cio2_fmt {
42 * These are raw formats used in Intel's third generation of
43 * Image Processing Unit known as IPU3.
44 * 10bit raw bayer packed, 32 bytes for every 25 pixels,
45 * last LSB 6 bits unused.
47 static const struct ipu3_cio2_fmt formats[] = {
48 { /* put default entry at beginning */
49 .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
50 .fourcc = V4L2_PIX_FMT_IPU3_SGRBG10,
54 .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
55 .fourcc = V4L2_PIX_FMT_IPU3_SGBRG10,
59 .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
60 .fourcc = V4L2_PIX_FMT_IPU3_SBGGR10,
64 .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
65 .fourcc = V4L2_PIX_FMT_IPU3_SRGGB10,
69 .mbus_code = MEDIA_BUS_FMT_Y10_1X10,
70 .fourcc = V4L2_PIX_FMT_IPU3_Y10,
77 * cio2_find_format - lookup color format by fourcc or/and media bus code
78 * @pixelformat: fourcc to match, ignored if null
79 * @mbus_code: media bus code to match, ignored if null
81 static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
86 for (i = 0; i < ARRAY_SIZE(formats); i++) {
87 if (pixelformat && *pixelformat != formats[i].fourcc)
89 if (mbus_code && *mbus_code != formats[i].mbus_code)
98 static inline u32 cio2_bytesperline(const unsigned int width)
101 * 64 bytes for every 50 pixels, the line length
102 * in bytes is multiple of 64 (line end alignment).
104 return DIV_ROUND_UP(width, 50) * 64;
107 /**************** FBPT operations ****************/
109 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
111 struct device *dev = &cio2->pci_dev->dev;
113 if (cio2->dummy_lop) {
114 dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_lop,
115 cio2->dummy_lop_bus_addr);
116 cio2->dummy_lop = NULL;
118 if (cio2->dummy_page) {
119 dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page,
120 cio2->dummy_page_bus_addr);
121 cio2->dummy_page = NULL;
125 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
127 struct device *dev = &cio2->pci_dev->dev;
130 cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
131 &cio2->dummy_page_bus_addr,
133 cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE,
134 &cio2->dummy_lop_bus_addr,
136 if (!cio2->dummy_page || !cio2->dummy_lop) {
137 cio2_fbpt_exit_dummy(cio2);
141 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
142 * Initialize each entry to dummy_page bus base address.
144 for (i = 0; i < CIO2_LOP_ENTRIES; i++)
145 cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
150 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
151 struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
154 * The CPU first initializes some fields in fbpt, then sets
155 * the VALID bit, this barrier is to ensure that the DMA(device)
156 * does not see the VALID bit enabled before other fields are
157 * initialized; otherwise it could lead to havoc.
162 * Request interrupts for start and completion
163 * Valid bit is applicable only to 1st entry
165 entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
166 CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
169 /* Initialize fpbt entries to point to dummy frame */
170 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
171 struct cio2_fbpt_entry
172 entry[CIO2_MAX_LOPS])
176 entry[0].first_entry.first_page_offset = 0;
177 entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
178 entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
180 for (i = 0; i < CIO2_MAX_LOPS; i++)
181 entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
183 cio2_fbpt_entry_enable(cio2, entry);
186 /* Initialize fpbt entries to point to a given buffer */
187 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
188 struct cio2_buffer *b,
189 struct cio2_fbpt_entry
190 entry[CIO2_MAX_LOPS])
192 struct vb2_buffer *vb = &b->vbb.vb2_buf;
193 unsigned int length = vb->planes[0].length;
196 entry[0].first_entry.first_page_offset = b->offset;
197 remaining = length + entry[0].first_entry.first_page_offset;
198 entry[1].second_entry.num_of_pages = PFN_UP(remaining);
200 * last_page_available_bytes has the offset of the last byte in the
201 * last page which is still accessible by DMA. DMA cannot access
202 * beyond this point. Valid range for this is from 0 to 4095.
203 * 0 indicates 1st byte in the page is DMA accessible.
204 * 4095 (PAGE_SIZE - 1) means every single byte in the last page
205 * is available for DMA transfer.
207 remaining = offset_in_page(remaining) ?: PAGE_SIZE;
208 entry[1].second_entry.last_page_available_bytes = remaining - 1;
212 while (remaining > 0) {
213 entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
214 remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
220 * The first not meaningful FBPT entry should point to a valid LOP
222 entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
224 cio2_fbpt_entry_enable(cio2, entry);
227 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
229 struct device *dev = &cio2->pci_dev->dev;
231 q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
239 static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
241 dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
244 /**************** CSI2 hardware setup ****************/
247 * The CSI2 receiver has several parameters affecting
248 * the receiver timings. These depend on the MIPI bus frequency
249 * F in Hz (sensor transmitter rate) as follows:
250 * register value = (A/1e9 + B * UI) / COUNT_ACC
252 * UI = 1 / (2 * F) in seconds
253 * COUNT_ACC = counter accuracy in seconds
254 * For IPU3 COUNT_ACC = 0.0625
256 * A and B are coefficients from the table below,
257 * depending whether the register minimum or maximum value is
261 * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
262 * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
264 * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
265 * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
266 * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
267 * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
268 * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
269 * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
270 * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
271 * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
273 * We use the minimum values of both A and B.
277 * shift for keeping value range suitable for 32-bit integer arithmetic
279 #define LIMIT_SHIFT 8
281 static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
283 const u32 accinv = 16; /* invert of counter resolution */
284 const u32 uiinv = 500000000; /* 1e9 / 2 */
287 freq >>= LIMIT_SHIFT;
289 if (WARN_ON(freq <= 0 || freq > S32_MAX))
292 * b could be 0, -2 or -8, so |accinv * b| is always
293 * less than (1 << ds) and thus |r| < 500000000.
295 r = accinv * b * (uiinv >> LIMIT_SHIFT);
297 /* max value of a is 95 */
303 /* Calculate the delay value for termination enable of clock lane HS Rx */
304 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
305 struct cio2_csi2_timing *timing,
306 unsigned int bpp, unsigned int lanes)
308 struct device *dev = &cio2->pci_dev->dev;
314 freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
316 dev_err(dev, "error %lld, invalid link_freq\n", freq);
320 timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
321 CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
323 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
324 timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
325 CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
327 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
328 timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
329 CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
331 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
332 timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
333 CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
335 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
337 dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
338 dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
339 dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
340 dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
345 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
347 static const int NUM_VCS = 4;
348 static const int SID; /* Stream id */
349 static const int ENTRY;
350 static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
351 CIO2_FBPT_SUBENTRY_UNIT);
352 const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
353 const struct ipu3_cio2_fmt *fmt;
354 void __iomem *const base = cio2->base;
355 u8 lanes, csi2bus = q->csi2.port;
356 u8 sensor_vc = SENSOR_VIR_CH_DFLT;
357 struct cio2_csi2_timing timing;
360 fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
364 lanes = q->csi2.lanes;
366 r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes);
370 writel(timing.clk_termen, q->csi_rx_base +
371 CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
372 writel(timing.clk_settle, q->csi_rx_base +
373 CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
375 for (i = 0; i < lanes; i++) {
376 writel(timing.dat_termen, q->csi_rx_base +
377 CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
378 writel(timing.dat_settle, q->csi_rx_base +
379 CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
382 writel(CIO2_PBM_WMCTRL1_MIN_2CK |
383 CIO2_PBM_WMCTRL1_MID1_2CK |
384 CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
385 writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
386 CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
387 CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
388 CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
389 CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
390 CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
391 writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
392 CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
393 CIO2_PBM_ARB_CTRL_LE_EN |
394 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
395 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
396 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
397 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
398 base + CIO2_REG_PBM_ARB_CTRL);
399 writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
400 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
401 writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
402 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
404 writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
405 writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
407 /* Configure MIPI backend */
408 for (i = 0; i < NUM_VCS; i++)
409 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
411 /* There are 16 short packet LUT entry */
412 for (i = 0; i < 16; i++)
413 writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
414 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
415 writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
416 q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
418 writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
419 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
420 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
421 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
422 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
423 writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
425 writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
426 CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
427 base + CIO2_REG_INT_EN);
429 writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
430 << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
431 base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
432 writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
433 sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
434 fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
435 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
436 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
437 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
438 writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
440 writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
441 writel(CIO2_CGC_PRIM_TGE |
445 CIO2_CGC_CSI2_INTERFRAME_TGE |
446 CIO2_CGC_CSI2_PORT_DCGE |
451 CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
452 CIO2_CGC_CSI_CLKGATE_HOLDOFF
453 << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
454 writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
455 writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
456 CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
457 CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
458 CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
459 base + CIO2_REG_LTRVAL01);
460 writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
461 CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
462 CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
463 CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
464 base + CIO2_REG_LTRVAL23);
466 for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
467 writel(0, base + CIO2_REG_CDMABA(i));
468 writel(0, base + CIO2_REG_CDMAC0(i));
469 writel(0, base + CIO2_REG_CDMAC1(i));
473 writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
475 writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
476 FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
477 CIO2_CDMAC0_DMA_INTR_ON_FE |
478 CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
480 CIO2_CDMAC0_DMA_INTR_ON_FS |
481 CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
483 writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
484 base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
486 writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
488 writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
489 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
490 CIO2_PXM_FRF_CFG_MSK_ECC_RE |
491 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
492 base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
494 /* Clear interrupts */
495 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
496 writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
497 writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
498 writel(~0, base + CIO2_REG_INT_STS);
500 /* Enable devices, starting from the last device in the pipe */
501 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
502 writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
507 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
509 struct device *dev = &cio2->pci_dev->dev;
510 void __iomem *const base = cio2->base;
515 /* Disable CSI receiver and MIPI backend devices */
516 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
517 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
518 writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
519 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
522 writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
523 ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
524 value, value & CIO2_CDMAC0_DMA_HALTED,
527 dev_err(dev, "DMA %i can not be halted\n", CIO2_DMA_CHAN);
529 for (i = 0; i < CIO2_NUM_PORTS; i++) {
530 writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
531 CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
532 writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
533 CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
537 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
539 struct device *dev = &cio2->pci_dev->dev;
540 struct cio2_queue *q = cio2->cur_queue;
541 struct cio2_fbpt_entry *entry;
542 u64 ns = ktime_get_ns();
544 if (dma_chan >= CIO2_QUEUES) {
545 dev_err(dev, "bad DMA channel %i\n", dma_chan);
549 entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
550 if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
551 dev_warn(dev, "no ready buffers found on DMA channel %u\n",
556 /* Find out which buffer(s) are ready */
558 struct cio2_buffer *b;
560 b = q->bufs[q->bufs_first];
562 unsigned int received = entry[1].second_entry.num_of_bytes;
563 unsigned long payload =
564 vb2_get_plane_payload(&b->vbb.vb2_buf, 0);
566 q->bufs[q->bufs_first] = NULL;
567 atomic_dec(&q->bufs_queued);
568 dev_dbg(dev, "buffer %i done\n", b->vbb.vb2_buf.index);
570 b->vbb.vb2_buf.timestamp = ns;
571 b->vbb.field = V4L2_FIELD_NONE;
572 b->vbb.sequence = atomic_read(&q->frame_sequence);
573 if (payload != received)
575 "payload length is %lu, received %u\n",
577 vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
579 atomic_inc(&q->frame_sequence);
580 cio2_fbpt_entry_init_dummy(cio2, entry);
581 q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
582 entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
583 } while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
586 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
589 * For the user space camera control algorithms it is essential
590 * to know when the reception of a frame has begun. That's often
591 * the best timing information to get from the hardware.
593 struct v4l2_event event = {
594 .type = V4L2_EVENT_FRAME_SYNC,
595 .u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
598 v4l2_event_queue(q->subdev.devnode, &event);
601 static const char *const cio2_irq_errs[] = {
602 "single packet header error corrected",
603 "multiple packet header errors detected",
604 "payload checksum (CRC) error",
606 "reserved short packet data type detected",
607 "reserved long packet data type detected",
608 "incomplete long packet detected",
611 "DPHY start of transmission error",
612 "DPHY synchronization error",
614 "escape mode trigger event",
615 "escape mode ultra-low power state for data lane(s)",
616 "escape mode ultra-low power state exit for clock lane",
617 "inter-frame short packet discarded",
618 "inter-frame long packet discarded",
619 "non-matching Long Packet stalled",
622 static void cio2_irq_log_irq_errs(struct device *dev, u8 port, u32 status)
624 unsigned long csi2_status = status;
627 for_each_set_bit(i, &csi2_status, ARRAY_SIZE(cio2_irq_errs))
628 dev_err(dev, "CSI-2 receiver port %i: %s\n",
629 port, cio2_irq_errs[i]);
631 if (fls_long(csi2_status) >= ARRAY_SIZE(cio2_irq_errs))
632 dev_warn(dev, "unknown CSI2 error 0x%lx on port %i\n",
636 static const char *const cio2_port_errs[] = {
638 "DPHY not recoverable",
639 "ECC not recoverable",
646 static void cio2_irq_log_port_errs(struct device *dev, u8 port, u32 status)
648 unsigned long port_status = status;
651 for_each_set_bit(i, &port_status, ARRAY_SIZE(cio2_port_errs))
652 dev_err(dev, "port %i error %s\n", port, cio2_port_errs[i]);
655 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
657 struct device *dev = &cio2->pci_dev->dev;
658 void __iomem *const base = cio2->base;
660 if (int_status & CIO2_INT_IOOE) {
662 * Interrupt on Output Error:
663 * 1) SRAM is full and FS received, or
664 * 2) An invalid bit detected by DMA.
666 u32 oe_status, oe_clear;
668 oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
669 oe_status = oe_clear;
671 if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
672 dev_err(dev, "DMA output error: 0x%x\n",
673 (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
674 >> CIO2_INT_EXT_OE_DMAOE_SHIFT);
675 oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
677 if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
678 dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
679 (oe_status & CIO2_INT_EXT_OE_OES_MASK)
680 >> CIO2_INT_EXT_OE_OES_SHIFT);
681 oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
683 writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
685 dev_warn(dev, "unknown interrupt 0x%x on OE\n",
687 int_status &= ~CIO2_INT_IOOE;
690 if (int_status & CIO2_INT_IOC_MASK) {
691 /* DMA IO done -- frame ready */
695 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
696 if (int_status & CIO2_INT_IOC(d)) {
697 clr |= CIO2_INT_IOC(d);
698 cio2_buffer_done(cio2, d);
703 if (int_status & CIO2_INT_IOS_IOLN_MASK) {
704 /* DMA IO starts or reached specified line */
708 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
709 if (int_status & CIO2_INT_IOS_IOLN(d)) {
710 clr |= CIO2_INT_IOS_IOLN(d);
711 if (d == CIO2_DMA_CHAN)
712 cio2_queue_event_sof(cio2,
718 if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
719 /* CSI2 receiver (error) interrupt */
723 ie_status = readl(base + CIO2_REG_INT_STS_EXT_IE);
725 for (port = 0; port < CIO2_NUM_PORTS; port++) {
726 u32 port_status = (ie_status >> (port * 8)) & 0xff;
728 cio2_irq_log_port_errs(dev, port, port_status);
730 if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
731 void __iomem *csi_rx_base =
732 base + CIO2_REG_PIPE_BASE(port);
735 csi2_status = readl(csi_rx_base +
736 CIO2_REG_IRQCTRL_STATUS);
738 cio2_irq_log_irq_errs(dev, port, csi2_status);
741 csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
745 writel(ie_status, base + CIO2_REG_INT_STS_EXT_IE);
747 int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
751 dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
754 static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
756 struct cio2_device *cio2 = cio2_ptr;
757 void __iomem *const base = cio2->base;
758 struct device *dev = &cio2->pci_dev->dev;
761 int_status = readl(base + CIO2_REG_INT_STS);
762 dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
767 writel(int_status, base + CIO2_REG_INT_STS);
768 cio2_irq_handle_once(cio2, int_status);
769 int_status = readl(base + CIO2_REG_INT_STS);
771 dev_dbg(dev, "pending status 0x%x\n", int_status);
772 } while (int_status);
777 /**************** Videobuf2 interface ****************/
779 static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
780 enum vb2_buffer_state state)
784 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
786 atomic_dec(&q->bufs_queued);
787 vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
794 static int cio2_vb2_queue_setup(struct vb2_queue *vq,
795 unsigned int *num_buffers,
796 unsigned int *num_planes,
797 unsigned int sizes[],
798 struct device *alloc_devs[])
800 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
801 struct device *dev = &cio2->pci_dev->dev;
802 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
805 if (*num_planes && *num_planes < q->format.num_planes)
808 for (i = 0; i < q->format.num_planes; ++i) {
809 if (*num_planes && sizes[i] < q->format.plane_fmt[i].sizeimage)
811 sizes[i] = q->format.plane_fmt[i].sizeimage;
815 *num_planes = q->format.num_planes;
816 *num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
818 /* Initialize buffer queue */
819 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
821 cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
823 atomic_set(&q->bufs_queued, 0);
830 /* Called after each buffer is allocated */
831 static int cio2_vb2_buf_init(struct vb2_buffer *vb)
833 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
834 struct device *dev = &cio2->pci_dev->dev;
835 struct cio2_buffer *b = to_cio2_buffer(vb);
836 unsigned int pages = PFN_UP(vb->planes[0].length);
837 unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
839 struct sg_dma_page_iter sg_iter;
842 if (lops <= 0 || lops > CIO2_MAX_LOPS) {
843 dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
844 vb->planes[0].length);
845 return -ENOSPC; /* Should never happen */
848 memset(b->lop, 0, sizeof(b->lop));
849 /* Allocate LOP table */
850 for (i = 0; i < lops; i++) {
851 b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
852 &b->lop_bus_addr[i], GFP_KERNEL);
858 sg = vb2_dma_sg_plane_desc(vb, 0);
862 if (sg->nents && sg->sgl)
863 b->offset = sg->sgl->offset;
866 for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
869 b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
871 if (j == CIO2_LOP_ENTRIES) {
877 b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
881 dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
885 /* Transfer buffer ownership to cio2 */
886 static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
888 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
889 struct device *dev = &cio2->pci_dev->dev;
890 struct cio2_queue *q =
891 container_of(vb->vb2_queue, struct cio2_queue, vbq);
892 struct cio2_buffer *b = to_cio2_buffer(vb);
893 struct cio2_fbpt_entry *entry;
895 unsigned int i, j, next = q->bufs_next;
896 int bufs_queued = atomic_inc_return(&q->bufs_queued);
899 dev_dbg(dev, "queue buffer %d\n", vb->index);
902 * This code queues the buffer to the CIO2 DMA engine, which starts
903 * running once streaming has started. It is possible that this code
904 * gets pre-empted due to increased CPU load. Upon this, the driver
905 * does not get an opportunity to queue new buffers to the CIO2 DMA
906 * engine. When the DMA engine encounters an FBPT entry without the
907 * VALID bit set, the DMA engine halts, which requires a restart of
908 * the DMA engine and sensor, to continue streaming.
909 * This is not desired and is highly unlikely given that there are
910 * 32 FBPT entries that the DMA engine needs to process, to run into
911 * an FBPT entry, without the VALID bit set. We try to mitigate this
912 * by disabling interrupts for the duration of this queueing.
914 local_irq_save(flags);
916 fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
917 >> CIO2_CDMARI_FBPT_RP_SHIFT)
918 & CIO2_CDMARI_FBPT_RP_MASK;
921 * fbpt_rp is the fbpt entry that the dma is currently working
922 * on, but since it could jump to next entry at any time,
923 * assume that we might already be there.
925 fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
927 if (bufs_queued <= 1 || fbpt_rp == next)
928 /* Buffers were drained */
929 next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
931 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
933 * We have allocated CIO2_MAX_BUFFERS circularly for the
934 * hw, the user has requested N buffer queue. The driver
935 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
936 * user queues a buffer, there necessarily is a free buffer.
938 if (!q->bufs[next]) {
940 entry = &q->fbpt[next * CIO2_MAX_LOPS];
941 cio2_fbpt_entry_init_buf(cio2, b, entry);
942 local_irq_restore(flags);
943 q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
944 for (j = 0; j < vb->num_planes; j++)
945 vb2_set_plane_payload(vb, j,
946 q->format.plane_fmt[j].sizeimage);
950 dev_dbg(dev, "entry %i was full!\n", next);
951 next = (next + 1) % CIO2_MAX_BUFFERS;
954 local_irq_restore(flags);
955 dev_err(dev, "error: all cio2 entries were full!\n");
956 atomic_dec(&q->bufs_queued);
957 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
960 /* Called when each buffer is freed */
961 static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
963 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
964 struct device *dev = &cio2->pci_dev->dev;
965 struct cio2_buffer *b = to_cio2_buffer(vb);
969 for (i = 0; i < CIO2_MAX_LOPS; i++) {
971 dma_free_coherent(dev, PAGE_SIZE,
972 b->lop[i], b->lop_bus_addr[i]);
976 static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
978 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
979 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
980 struct device *dev = &cio2->pci_dev->dev;
984 atomic_set(&q->frame_sequence, 0);
986 r = pm_runtime_resume_and_get(dev);
988 dev_info(dev, "failed to set power %d\n", r);
992 r = video_device_pipeline_start(&q->vdev, &q->pipe);
996 r = cio2_hw_init(cio2, q);
1000 /* Start streaming on sensor */
1001 r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1003 goto fail_csi2_subdev;
1005 cio2->streaming = true;
1010 cio2_hw_exit(cio2, q);
1012 video_device_pipeline_stop(&q->vdev);
1014 dev_dbg(dev, "failed to start streaming (%d)\n", r);
1015 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1016 pm_runtime_put(dev);
1021 static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1023 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1024 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1025 struct device *dev = &cio2->pci_dev->dev;
1027 if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1028 dev_err(dev, "failed to stop sensor streaming\n");
1030 cio2_hw_exit(cio2, q);
1031 synchronize_irq(cio2->pci_dev->irq);
1032 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1033 video_device_pipeline_stop(&q->vdev);
1034 pm_runtime_put(dev);
1035 cio2->streaming = false;
1038 static const struct vb2_ops cio2_vb2_ops = {
1039 .buf_init = cio2_vb2_buf_init,
1040 .buf_queue = cio2_vb2_buf_queue,
1041 .buf_cleanup = cio2_vb2_buf_cleanup,
1042 .queue_setup = cio2_vb2_queue_setup,
1043 .start_streaming = cio2_vb2_start_streaming,
1044 .stop_streaming = cio2_vb2_stop_streaming,
1045 .wait_prepare = vb2_ops_wait_prepare,
1046 .wait_finish = vb2_ops_wait_finish,
1049 /**************** V4L2 interface ****************/
1051 static int cio2_v4l2_querycap(struct file *file, void *fh,
1052 struct v4l2_capability *cap)
1054 strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1055 strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1060 static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1061 struct v4l2_fmtdesc *f)
1063 if (f->index >= ARRAY_SIZE(formats))
1066 f->pixelformat = formats[f->index].fourcc;
1071 /* The format is validated in cio2_video_link_validate() */
1072 static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1074 struct cio2_queue *q = file_to_cio2_queue(file);
1076 f->fmt.pix_mp = q->format;
1081 static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1083 const struct ipu3_cio2_fmt *fmt;
1084 struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1086 fmt = cio2_find_format(&mpix->pixelformat, NULL);
1090 /* Only supports up to 4224x3136 */
1091 if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1092 mpix->width = CIO2_IMAGE_MAX_WIDTH;
1093 if (mpix->height > CIO2_IMAGE_MAX_HEIGHT)
1094 mpix->height = CIO2_IMAGE_MAX_HEIGHT;
1096 mpix->num_planes = 1;
1097 mpix->pixelformat = fmt->fourcc;
1098 mpix->colorspace = V4L2_COLORSPACE_RAW;
1099 mpix->field = V4L2_FIELD_NONE;
1100 mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1101 mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1105 mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1106 mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1107 mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1112 static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1114 struct cio2_queue *q = file_to_cio2_queue(file);
1116 cio2_v4l2_try_fmt(file, fh, f);
1117 q->format = f->fmt.pix_mp;
1123 cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1125 if (input->index > 0)
1128 strscpy(input->name, "camera", sizeof(input->name));
1129 input->type = V4L2_INPUT_TYPE_CAMERA;
1135 cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1143 cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1145 return input == 0 ? 0 : -EINVAL;
1148 static const struct v4l2_file_operations cio2_v4l2_fops = {
1149 .owner = THIS_MODULE,
1150 .unlocked_ioctl = video_ioctl2,
1151 .open = v4l2_fh_open,
1152 .release = vb2_fop_release,
1153 .poll = vb2_fop_poll,
1154 .mmap = vb2_fop_mmap,
1157 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1158 .vidioc_querycap = cio2_v4l2_querycap,
1159 .vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
1160 .vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1161 .vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1162 .vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1163 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1164 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1165 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1166 .vidioc_querybuf = vb2_ioctl_querybuf,
1167 .vidioc_qbuf = vb2_ioctl_qbuf,
1168 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1169 .vidioc_streamon = vb2_ioctl_streamon,
1170 .vidioc_streamoff = vb2_ioctl_streamoff,
1171 .vidioc_expbuf = vb2_ioctl_expbuf,
1172 .vidioc_enum_input = cio2_video_enum_input,
1173 .vidioc_g_input = cio2_video_g_input,
1174 .vidioc_s_input = cio2_video_s_input,
1177 static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1179 struct v4l2_event_subscription *sub)
1181 if (sub->type != V4L2_EVENT_FRAME_SYNC)
1184 /* Line number. For now only zero accepted. */
1188 return v4l2_event_subscribe(fh, sub, 0, NULL);
1191 static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1193 struct v4l2_mbus_framefmt *format;
1194 const struct v4l2_mbus_framefmt fmt_default = {
1197 .code = formats[0].mbus_code,
1198 .field = V4L2_FIELD_NONE,
1199 .colorspace = V4L2_COLORSPACE_RAW,
1200 .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1201 .quantization = V4L2_QUANTIZATION_DEFAULT,
1202 .xfer_func = V4L2_XFER_FUNC_DEFAULT,
1205 /* Initialize try_fmt */
1206 format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SINK);
1207 *format = fmt_default;
1210 format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SOURCE);
1211 *format = fmt_default;
1217 * cio2_subdev_get_fmt - Handle get format by pads subdev method
1218 * @sd : pointer to v4l2 subdev structure
1219 * @cfg: V4L2 subdev pad config
1220 * @fmt: pointer to v4l2 subdev format structure
1221 * return -EINVAL or zero on success
1223 static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1224 struct v4l2_subdev_state *sd_state,
1225 struct v4l2_subdev_format *fmt)
1227 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1229 mutex_lock(&q->subdev_lock);
1231 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1232 fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
1235 fmt->format = q->subdev_fmt;
1237 mutex_unlock(&q->subdev_lock);
1243 * cio2_subdev_set_fmt - Handle set format by pads subdev method
1244 * @sd : pointer to v4l2 subdev structure
1245 * @cfg: V4L2 subdev pad config
1246 * @fmt: pointer to v4l2 subdev format structure
1247 * return -EINVAL or zero on success
1249 static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1250 struct v4l2_subdev_state *sd_state,
1251 struct v4l2_subdev_format *fmt)
1253 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1254 struct v4l2_mbus_framefmt *mbus;
1255 u32 mbus_code = fmt->format.code;
1259 * Only allow setting sink pad format;
1260 * source always propagates from sink
1262 if (fmt->pad == CIO2_PAD_SOURCE)
1263 return cio2_subdev_get_fmt(sd, sd_state, fmt);
1265 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1266 mbus = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
1268 mbus = &q->subdev_fmt;
1270 fmt->format.code = formats[0].mbus_code;
1272 for (i = 0; i < ARRAY_SIZE(formats); i++) {
1273 if (formats[i].mbus_code == mbus_code) {
1274 fmt->format.code = mbus_code;
1279 fmt->format.width = min(fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
1280 fmt->format.height = min(fmt->format.height, CIO2_IMAGE_MAX_HEIGHT);
1281 fmt->format.field = V4L2_FIELD_NONE;
1283 mutex_lock(&q->subdev_lock);
1284 *mbus = fmt->format;
1285 mutex_unlock(&q->subdev_lock);
1290 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1291 struct v4l2_subdev_state *sd_state,
1292 struct v4l2_subdev_mbus_code_enum *code)
1294 if (code->index >= ARRAY_SIZE(formats))
1297 code->code = formats[code->index].mbus_code;
1301 static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1302 struct v4l2_subdev_format *fmt)
1304 if (is_media_entity_v4l2_subdev(pad->entity)) {
1305 struct v4l2_subdev *sd =
1306 media_entity_to_v4l2_subdev(pad->entity);
1308 memset(fmt, 0, sizeof(*fmt));
1309 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1310 fmt->pad = pad->index;
1311 return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1317 static int cio2_video_link_validate(struct media_link *link)
1319 struct media_entity *entity = link->sink->entity;
1320 struct video_device *vd = media_entity_to_video_device(entity);
1321 struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1322 struct cio2_device *cio2 = video_get_drvdata(vd);
1323 struct device *dev = &cio2->pci_dev->dev;
1324 struct v4l2_subdev_format source_fmt;
1327 if (!media_pad_remote_pad_first(entity->pads)) {
1328 dev_info(dev, "video node %s pad not connected\n", vd->name);
1332 ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1336 if (source_fmt.format.width != q->format.width ||
1337 source_fmt.format.height != q->format.height) {
1338 dev_err(dev, "Wrong width or height %ux%u (%ux%u expected)\n",
1339 q->format.width, q->format.height,
1340 source_fmt.format.width, source_fmt.format.height);
1344 if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1350 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1351 .subscribe_event = cio2_subdev_subscribe_event,
1352 .unsubscribe_event = v4l2_event_subdev_unsubscribe,
1355 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1356 .open = cio2_subdev_open,
1359 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1360 .link_validate = v4l2_subdev_link_validate_default,
1361 .get_fmt = cio2_subdev_get_fmt,
1362 .set_fmt = cio2_subdev_set_fmt,
1363 .enum_mbus_code = cio2_subdev_enum_mbus_code,
1366 static const struct v4l2_subdev_ops cio2_subdev_ops = {
1367 .core = &cio2_subdev_core_ops,
1368 .pad = &cio2_subdev_pad_ops,
1371 /******* V4L2 sub-device asynchronous registration callbacks***********/
1373 struct sensor_async_subdev {
1374 struct v4l2_async_subdev asd;
1375 struct csi2_bus_info csi2;
1378 #define to_sensor_asd(asd) container_of(asd, struct sensor_async_subdev, asd)
1380 /* The .bound() notifier callback when a match is found */
1381 static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1382 struct v4l2_subdev *sd,
1383 struct v4l2_async_subdev *asd)
1385 struct cio2_device *cio2 = to_cio2_device(notifier);
1386 struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1387 struct cio2_queue *q;
1389 if (cio2->queue[s_asd->csi2.port].sensor)
1392 q = &cio2->queue[s_asd->csi2.port];
1394 q->csi2 = s_asd->csi2;
1396 q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1401 /* The .unbind callback */
1402 static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1403 struct v4l2_subdev *sd,
1404 struct v4l2_async_subdev *asd)
1406 struct cio2_device *cio2 = to_cio2_device(notifier);
1407 struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1409 cio2->queue[s_asd->csi2.port].sensor = NULL;
1412 /* .complete() is called after all subdevices have been located */
1413 static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1415 struct cio2_device *cio2 = to_cio2_device(notifier);
1416 struct device *dev = &cio2->pci_dev->dev;
1417 struct sensor_async_subdev *s_asd;
1418 struct v4l2_async_subdev *asd;
1419 struct cio2_queue *q;
1423 list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) {
1424 s_asd = to_sensor_asd(asd);
1425 q = &cio2->queue[s_asd->csi2.port];
1427 for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
1428 if (q->sensor->entity.pads[pad].flags &
1429 MEDIA_PAD_FL_SOURCE)
1432 if (pad == q->sensor->entity.num_pads) {
1433 dev_err(dev, "failed to find src pad for %s\n",
1438 ret = media_create_pad_link(
1439 &q->sensor->entity, pad,
1440 &q->subdev.entity, CIO2_PAD_SINK,
1443 dev_err(dev, "failed to create link for %s\n",
1449 return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1452 static const struct v4l2_async_notifier_operations cio2_async_ops = {
1453 .bound = cio2_notifier_bound,
1454 .unbind = cio2_notifier_unbind,
1455 .complete = cio2_notifier_complete,
1458 static int cio2_parse_firmware(struct cio2_device *cio2)
1460 struct device *dev = &cio2->pci_dev->dev;
1464 for (i = 0; i < CIO2_NUM_PORTS; i++) {
1465 struct v4l2_fwnode_endpoint vep = {
1466 .bus_type = V4L2_MBUS_CSI2_DPHY
1468 struct sensor_async_subdev *s_asd;
1469 struct fwnode_handle *ep;
1471 ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0,
1472 FWNODE_GRAPH_ENDPOINT_NEXT);
1476 ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1480 s_asd = v4l2_async_nf_add_fwnode_remote(&cio2->notifier, ep,
1482 sensor_async_subdev);
1483 if (IS_ERR(s_asd)) {
1484 ret = PTR_ERR(s_asd);
1488 s_asd->csi2.port = vep.base.port;
1489 s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1491 fwnode_handle_put(ep);
1496 fwnode_handle_put(ep);
1501 * Proceed even without sensors connected to allow the device to
1504 cio2->notifier.ops = &cio2_async_ops;
1505 ret = v4l2_async_nf_register(&cio2->v4l2_dev, &cio2->notifier);
1507 dev_err(dev, "failed to register async notifier : %d\n", ret);
1512 /**************** Queue initialization ****************/
1513 static const struct media_entity_operations cio2_media_ops = {
1514 .link_validate = v4l2_subdev_link_validate,
1517 static const struct media_entity_operations cio2_video_entity_ops = {
1518 .link_validate = cio2_video_link_validate,
1521 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1523 static const u32 default_width = 1936;
1524 static const u32 default_height = 1096;
1525 const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1526 struct device *dev = &cio2->pci_dev->dev;
1527 struct video_device *vdev = &q->vdev;
1528 struct vb2_queue *vbq = &q->vbq;
1529 struct v4l2_subdev *subdev = &q->subdev;
1530 struct v4l2_mbus_framefmt *fmt;
1533 /* Initialize miscellaneous variables */
1534 mutex_init(&q->lock);
1535 mutex_init(&q->subdev_lock);
1537 /* Initialize formats to default values */
1538 fmt = &q->subdev_fmt;
1539 fmt->width = default_width;
1540 fmt->height = default_height;
1541 fmt->code = dflt_fmt.mbus_code;
1542 fmt->field = V4L2_FIELD_NONE;
1544 q->format.width = default_width;
1545 q->format.height = default_height;
1546 q->format.pixelformat = dflt_fmt.fourcc;
1547 q->format.colorspace = V4L2_COLORSPACE_RAW;
1548 q->format.field = V4L2_FIELD_NONE;
1549 q->format.num_planes = 1;
1550 q->format.plane_fmt[0].bytesperline =
1551 cio2_bytesperline(q->format.width);
1552 q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1555 /* Initialize fbpt */
1556 r = cio2_fbpt_init(cio2, q);
1560 /* Initialize media entities */
1561 q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1562 MEDIA_PAD_FL_MUST_CONNECT;
1563 q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1564 subdev->entity.ops = &cio2_media_ops;
1565 subdev->internal_ops = &cio2_subdev_internal_ops;
1566 r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1568 dev_err(dev, "failed initialize subdev media entity (%d)\n", r);
1569 goto fail_subdev_media_entity;
1572 q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1573 vdev->entity.ops = &cio2_video_entity_ops;
1574 r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1576 dev_err(dev, "failed initialize videodev media entity (%d)\n",
1578 goto fail_vdev_media_entity;
1581 /* Initialize subdev */
1582 v4l2_subdev_init(subdev, &cio2_subdev_ops);
1583 subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1584 subdev->owner = THIS_MODULE;
1585 snprintf(subdev->name, sizeof(subdev->name),
1586 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1587 subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
1588 v4l2_set_subdevdata(subdev, cio2);
1589 r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1591 dev_err(dev, "failed initialize subdev (%d)\n", r);
1595 /* Initialize vbq */
1596 vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1597 vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1598 vbq->ops = &cio2_vb2_ops;
1599 vbq->mem_ops = &vb2_dma_sg_memops;
1600 vbq->buf_struct_size = sizeof(struct cio2_buffer);
1601 vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1602 vbq->min_buffers_needed = 1;
1603 vbq->drv_priv = cio2;
1604 vbq->lock = &q->lock;
1605 r = vb2_queue_init(vbq);
1607 dev_err(dev, "failed to initialize videobuf2 queue (%d)\n", r);
1611 /* Initialize vdev */
1612 snprintf(vdev->name, sizeof(vdev->name),
1613 "%s %td", CIO2_NAME, q - cio2->queue);
1614 vdev->release = video_device_release_empty;
1615 vdev->fops = &cio2_v4l2_fops;
1616 vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1617 vdev->lock = &cio2->lock;
1618 vdev->v4l2_dev = &cio2->v4l2_dev;
1619 vdev->queue = &q->vbq;
1620 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1621 video_set_drvdata(vdev, cio2);
1622 r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1624 dev_err(dev, "failed to register video device (%d)\n", r);
1628 /* Create link from CIO2 subdev to output node */
1629 r = media_create_pad_link(
1630 &subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1631 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1638 vb2_video_unregister_device(&q->vdev);
1640 v4l2_device_unregister_subdev(subdev);
1642 media_entity_cleanup(&vdev->entity);
1643 fail_vdev_media_entity:
1644 media_entity_cleanup(&subdev->entity);
1645 fail_subdev_media_entity:
1646 cio2_fbpt_exit(q, dev);
1648 mutex_destroy(&q->subdev_lock);
1649 mutex_destroy(&q->lock);
1654 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1656 vb2_video_unregister_device(&q->vdev);
1657 media_entity_cleanup(&q->vdev.entity);
1658 v4l2_device_unregister_subdev(&q->subdev);
1659 media_entity_cleanup(&q->subdev.entity);
1660 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1661 mutex_destroy(&q->subdev_lock);
1662 mutex_destroy(&q->lock);
1665 static int cio2_queues_init(struct cio2_device *cio2)
1669 for (i = 0; i < CIO2_QUEUES; i++) {
1670 r = cio2_queue_init(cio2, &cio2->queue[i]);
1675 if (i == CIO2_QUEUES)
1678 for (i--; i >= 0; i--)
1679 cio2_queue_exit(cio2, &cio2->queue[i]);
1684 static void cio2_queues_exit(struct cio2_device *cio2)
1688 for (i = 0; i < CIO2_QUEUES; i++)
1689 cio2_queue_exit(cio2, &cio2->queue[i]);
1692 static int cio2_check_fwnode_graph(struct fwnode_handle *fwnode)
1694 struct fwnode_handle *endpoint;
1696 if (IS_ERR_OR_NULL(fwnode))
1699 endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
1701 fwnode_handle_put(endpoint);
1705 return cio2_check_fwnode_graph(fwnode->secondary);
1708 /**************** PCI interface ****************/
1710 static int cio2_pci_probe(struct pci_dev *pci_dev,
1711 const struct pci_device_id *id)
1713 struct device *dev = &pci_dev->dev;
1714 struct fwnode_handle *fwnode = dev_fwnode(dev);
1715 struct cio2_device *cio2;
1719 * On some platforms no connections to sensors are defined in firmware,
1720 * if the device has no endpoints then we can try to build those as
1721 * software_nodes parsed from SSDB.
1723 r = cio2_check_fwnode_graph(fwnode);
1725 if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
1726 dev_err(dev, "fwnode graph has no endpoints connected\n");
1730 r = cio2_bridge_init(pci_dev);
1735 cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
1738 cio2->pci_dev = pci_dev;
1740 r = pcim_enable_device(pci_dev);
1742 dev_err(dev, "failed to enable device (%d)\n", r);
1746 dev_info(dev, "device 0x%x (rev: 0x%x)\n",
1747 pci_dev->device, pci_dev->revision);
1749 r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1751 dev_err(dev, "failed to remap I/O memory (%d)\n", r);
1755 cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
1757 pci_set_drvdata(pci_dev, cio2);
1759 pci_set_master(pci_dev);
1761 r = dma_set_mask(&pci_dev->dev, CIO2_DMA_MASK);
1763 dev_err(dev, "failed to set DMA mask (%d)\n", r);
1767 r = pci_enable_msi(pci_dev);
1769 dev_err(dev, "failed to enable MSI (%d)\n", r);
1773 r = cio2_fbpt_init_dummy(cio2);
1777 mutex_init(&cio2->lock);
1779 cio2->media_dev.dev = dev;
1780 strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1781 sizeof(cio2->media_dev.model));
1782 cio2->media_dev.hw_revision = 0;
1784 media_device_init(&cio2->media_dev);
1785 r = media_device_register(&cio2->media_dev);
1787 goto fail_mutex_destroy;
1789 cio2->v4l2_dev.mdev = &cio2->media_dev;
1790 r = v4l2_device_register(dev, &cio2->v4l2_dev);
1792 dev_err(dev, "failed to register V4L2 device (%d)\n", r);
1793 goto fail_media_device_unregister;
1796 r = cio2_queues_init(cio2);
1798 goto fail_v4l2_device_unregister;
1800 v4l2_async_nf_init(&cio2->notifier);
1802 /* Register notifier for subdevices we care */
1803 r = cio2_parse_firmware(cio2);
1805 goto fail_clean_notifier;
1807 r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
1810 dev_err(dev, "failed to request IRQ (%d)\n", r);
1811 goto fail_clean_notifier;
1814 pm_runtime_put_noidle(dev);
1815 pm_runtime_allow(dev);
1819 fail_clean_notifier:
1820 v4l2_async_nf_unregister(&cio2->notifier);
1821 v4l2_async_nf_cleanup(&cio2->notifier);
1822 cio2_queues_exit(cio2);
1823 fail_v4l2_device_unregister:
1824 v4l2_device_unregister(&cio2->v4l2_dev);
1825 fail_media_device_unregister:
1826 media_device_unregister(&cio2->media_dev);
1827 media_device_cleanup(&cio2->media_dev);
1829 mutex_destroy(&cio2->lock);
1830 cio2_fbpt_exit_dummy(cio2);
1835 static void cio2_pci_remove(struct pci_dev *pci_dev)
1837 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1839 media_device_unregister(&cio2->media_dev);
1840 v4l2_async_nf_unregister(&cio2->notifier);
1841 v4l2_async_nf_cleanup(&cio2->notifier);
1842 cio2_queues_exit(cio2);
1843 cio2_fbpt_exit_dummy(cio2);
1844 v4l2_device_unregister(&cio2->v4l2_dev);
1845 media_device_cleanup(&cio2->media_dev);
1846 mutex_destroy(&cio2->lock);
1848 pm_runtime_forbid(&pci_dev->dev);
1849 pm_runtime_get_noresume(&pci_dev->dev);
1852 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1854 struct pci_dev *pci_dev = to_pci_dev(dev);
1855 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1856 void __iomem *const base = cio2->base;
1859 writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1860 dev_dbg(dev, "cio2 runtime suspend.\n");
1862 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1863 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1864 pm |= CIO2_PMCSR_D3;
1865 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1870 static int __maybe_unused cio2_runtime_resume(struct device *dev)
1872 struct pci_dev *pci_dev = to_pci_dev(dev);
1873 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1874 void __iomem *const base = cio2->base;
1877 writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1878 dev_dbg(dev, "cio2 runtime resume.\n");
1880 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1881 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1882 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1888 * Helper function to advance all the elements of a circular buffer by "start"
1891 static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1897 { start, elems - 1 },
1900 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1902 /* Loop as long as we have out-of-place entries */
1903 while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1907 * Find the number of entries that can be arranged on this
1910 size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1912 /* Swap the entries in two parts of the array. */
1913 for (i = 0; i < size0; i++) {
1914 u8 *d = ptr + elem_size * (arr[1].begin + i);
1915 u8 *s = ptr + elem_size * (arr[0].begin + i);
1918 for (j = 0; j < elem_size; j++)
1922 if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1923 /* The end of the first array remains unarranged. */
1924 arr[0].begin += size0;
1927 * The first array is fully arranged so we proceed
1928 * handling the next one.
1930 arr[0].begin = arr[1].begin;
1931 arr[0].end = arr[1].begin + size0 - 1;
1932 arr[1].begin += size0;
1937 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1941 for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1942 i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1946 if (i == CIO2_MAX_BUFFERS)
1950 arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1951 CIO2_MAX_BUFFERS, j);
1952 arrange(q->bufs, sizeof(struct cio2_buffer *),
1953 CIO2_MAX_BUFFERS, j);
1957 * DMA clears the valid bit when accessing the buffer.
1958 * When stopping stream in suspend callback, some of the buffers
1959 * may be in invalid state. After resume, when DMA meets the invalid
1960 * buffer, it will halt and stop receiving new data.
1961 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1963 for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1964 cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1967 static int __maybe_unused cio2_suspend(struct device *dev)
1969 struct pci_dev *pci_dev = to_pci_dev(dev);
1970 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1971 struct cio2_queue *q = cio2->cur_queue;
1974 dev_dbg(dev, "cio2 suspend\n");
1975 if (!cio2->streaming)
1979 r = v4l2_subdev_call(q->sensor, video, s_stream, 0);
1981 dev_err(dev, "failed to stop sensor streaming\n");
1985 cio2_hw_exit(cio2, q);
1986 synchronize_irq(pci_dev->irq);
1988 pm_runtime_force_suspend(dev);
1991 * Upon resume, hw starts to process the fbpt entries from beginning,
1992 * so relocate the queued buffs to the fbpt head before suspend.
1994 cio2_fbpt_rearrange(cio2, q);
2001 static int __maybe_unused cio2_resume(struct device *dev)
2003 struct cio2_device *cio2 = dev_get_drvdata(dev);
2004 struct cio2_queue *q = cio2->cur_queue;
2007 dev_dbg(dev, "cio2 resume\n");
2008 if (!cio2->streaming)
2011 r = pm_runtime_force_resume(dev);
2013 dev_err(dev, "failed to set power %d\n", r);
2017 r = cio2_hw_init(cio2, q);
2019 dev_err(dev, "fail to init cio2 hw\n");
2023 r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
2025 dev_err(dev, "fail to start sensor streaming\n");
2026 cio2_hw_exit(cio2, q);
2032 static const struct dev_pm_ops cio2_pm_ops = {
2033 SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
2034 SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
2037 static const struct pci_device_id cio2_pci_id_table[] = {
2038 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
2042 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
2044 static struct pci_driver cio2_pci_driver = {
2046 .id_table = cio2_pci_id_table,
2047 .probe = cio2_pci_probe,
2048 .remove = cio2_pci_remove,
2054 module_pci_driver(cio2_pci_driver);
2058 MODULE_AUTHOR("Jian Xu Zheng");
2061 MODULE_LICENSE("GPL v2");
2062 MODULE_DESCRIPTION("IPU3 CIO2 driver");