1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017,2020 Intel Corporation
5 * Based partially on Intel IPU4 driver written by
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/pfn.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/property.h>
23 #include <linux/vmalloc.h>
24 #include <media/v4l2-ctrls.h>
25 #include <media/v4l2-device.h>
26 #include <media/v4l2-event.h>
27 #include <media/v4l2-fwnode.h>
28 #include <media/v4l2-ioctl.h>
29 #include <media/videobuf2-dma-sg.h>
31 #include "ipu3-cio2.h"
33 struct ipu3_cio2_fmt {
41 * These are raw formats used in Intel's third generation of
42 * Image Processing Unit known as IPU3.
43 * 10bit raw bayer packed, 32 bytes for every 25 pixels,
44 * last LSB 6 bits unused.
46 static const struct ipu3_cio2_fmt formats[] = {
47 { /* put default entry at beginning */
48 .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
49 .fourcc = V4L2_PIX_FMT_IPU3_SGRBG10,
53 .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
54 .fourcc = V4L2_PIX_FMT_IPU3_SGBRG10,
58 .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
59 .fourcc = V4L2_PIX_FMT_IPU3_SBGGR10,
63 .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
64 .fourcc = V4L2_PIX_FMT_IPU3_SRGGB10,
71 * cio2_find_format - lookup color format by fourcc or/and media bus code
72 * @pixelformat: fourcc to match, ignored if null
73 * @mbus_code: media bus code to match, ignored if null
75 static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
80 for (i = 0; i < ARRAY_SIZE(formats); i++) {
81 if (pixelformat && *pixelformat != formats[i].fourcc)
83 if (mbus_code && *mbus_code != formats[i].mbus_code)
92 static inline u32 cio2_bytesperline(const unsigned int width)
95 * 64 bytes for every 50 pixels, the line length
96 * in bytes is multiple of 64 (line end alignment).
98 return DIV_ROUND_UP(width, 50) * 64;
101 /**************** FBPT operations ****************/
103 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
105 if (cio2->dummy_lop) {
106 dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
107 cio2->dummy_lop, cio2->dummy_lop_bus_addr);
108 cio2->dummy_lop = NULL;
110 if (cio2->dummy_page) {
111 dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
112 cio2->dummy_page, cio2->dummy_page_bus_addr);
113 cio2->dummy_page = NULL;
117 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
121 cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
122 &cio2->dummy_page_bus_addr,
124 cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
125 &cio2->dummy_lop_bus_addr,
127 if (!cio2->dummy_page || !cio2->dummy_lop) {
128 cio2_fbpt_exit_dummy(cio2);
132 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
133 * Initialize each entry to dummy_page bus base address.
135 for (i = 0; i < CIO2_LOP_ENTRIES; i++)
136 cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
141 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
142 struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
145 * The CPU first initializes some fields in fbpt, then sets
146 * the VALID bit, this barrier is to ensure that the DMA(device)
147 * does not see the VALID bit enabled before other fields are
148 * initialized; otherwise it could lead to havoc.
153 * Request interrupts for start and completion
154 * Valid bit is applicable only to 1st entry
156 entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
157 CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
160 /* Initialize fpbt entries to point to dummy frame */
161 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
162 struct cio2_fbpt_entry
163 entry[CIO2_MAX_LOPS])
167 entry[0].first_entry.first_page_offset = 0;
168 entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
169 entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
171 for (i = 0; i < CIO2_MAX_LOPS; i++)
172 entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
174 cio2_fbpt_entry_enable(cio2, entry);
177 /* Initialize fpbt entries to point to a given buffer */
178 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
179 struct cio2_buffer *b,
180 struct cio2_fbpt_entry
181 entry[CIO2_MAX_LOPS])
183 struct vb2_buffer *vb = &b->vbb.vb2_buf;
184 unsigned int length = vb->planes[0].length;
187 entry[0].first_entry.first_page_offset = b->offset;
188 remaining = length + entry[0].first_entry.first_page_offset;
189 entry[1].second_entry.num_of_pages = PFN_UP(remaining);
191 * last_page_available_bytes has the offset of the last byte in the
192 * last page which is still accessible by DMA. DMA cannot access
193 * beyond this point. Valid range for this is from 0 to 4095.
194 * 0 indicates 1st byte in the page is DMA accessible.
195 * 4095 (PAGE_SIZE - 1) means every single byte in the last page
196 * is available for DMA transfer.
198 remaining = offset_in_page(remaining) ?: PAGE_SIZE;
199 entry[1].second_entry.last_page_available_bytes = remaining - 1;
203 while (remaining > 0) {
204 entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
205 remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
211 * The first not meaningful FBPT entry should point to a valid LOP
213 entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
215 cio2_fbpt_entry_enable(cio2, entry);
218 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
220 struct device *dev = &cio2->pci_dev->dev;
222 q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
230 static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
232 dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
235 /**************** CSI2 hardware setup ****************/
238 * The CSI2 receiver has several parameters affecting
239 * the receiver timings. These depend on the MIPI bus frequency
240 * F in Hz (sensor transmitter rate) as follows:
241 * register value = (A/1e9 + B * UI) / COUNT_ACC
243 * UI = 1 / (2 * F) in seconds
244 * COUNT_ACC = counter accuracy in seconds
245 * For IPU3 COUNT_ACC = 0.0625
247 * A and B are coefficients from the table below,
248 * depending whether the register minimum or maximum value is
252 * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
253 * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
255 * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
256 * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
257 * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
258 * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
259 * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
260 * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
261 * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
262 * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
264 * We use the minimum values of both A and B.
268 * shift for keeping value range suitable for 32-bit integer arithmetic
270 #define LIMIT_SHIFT 8
272 static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
274 const u32 accinv = 16; /* invert of counter resolution */
275 const u32 uiinv = 500000000; /* 1e9 / 2 */
278 freq >>= LIMIT_SHIFT;
280 if (WARN_ON(freq <= 0 || freq > S32_MAX))
283 * b could be 0, -2 or -8, so |accinv * b| is always
284 * less than (1 << ds) and thus |r| < 500000000.
286 r = accinv * b * (uiinv >> LIMIT_SHIFT);
288 /* max value of a is 95 */
294 /* Calculate the delay value for termination enable of clock lane HS Rx */
295 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
296 struct cio2_csi2_timing *timing,
297 unsigned int bpp, unsigned int lanes)
299 struct device *dev = &cio2->pci_dev->dev;
305 freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
307 dev_err(dev, "error %lld, invalid link_freq\n", freq);
311 timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
312 CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
314 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
315 timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
316 CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
318 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
319 timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
320 CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
322 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
323 timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
324 CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
326 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
328 dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
329 dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
330 dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
331 dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
336 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
338 static const int NUM_VCS = 4;
339 static const int SID; /* Stream id */
340 static const int ENTRY;
341 static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
342 CIO2_FBPT_SUBENTRY_UNIT);
343 const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
344 const struct ipu3_cio2_fmt *fmt;
345 void __iomem *const base = cio2->base;
346 u8 lanes, csi2bus = q->csi2.port;
347 u8 sensor_vc = SENSOR_VIR_CH_DFLT;
348 struct cio2_csi2_timing timing;
351 fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
355 lanes = q->csi2.lanes;
357 r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes);
361 writel(timing.clk_termen, q->csi_rx_base +
362 CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
363 writel(timing.clk_settle, q->csi_rx_base +
364 CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
366 for (i = 0; i < lanes; i++) {
367 writel(timing.dat_termen, q->csi_rx_base +
368 CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
369 writel(timing.dat_settle, q->csi_rx_base +
370 CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
373 writel(CIO2_PBM_WMCTRL1_MIN_2CK |
374 CIO2_PBM_WMCTRL1_MID1_2CK |
375 CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
376 writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
377 CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
378 CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
379 CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
380 CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
381 CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
382 writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
383 CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
384 CIO2_PBM_ARB_CTRL_LE_EN |
385 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
386 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
387 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
388 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
389 base + CIO2_REG_PBM_ARB_CTRL);
390 writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
391 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
392 writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
393 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
395 writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
396 writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
398 /* Configure MIPI backend */
399 for (i = 0; i < NUM_VCS; i++)
400 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
402 /* There are 16 short packet LUT entry */
403 for (i = 0; i < 16; i++)
404 writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
405 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
406 writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
407 q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
409 writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
410 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
411 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
412 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
413 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
414 writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
416 writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
417 CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
418 base + CIO2_REG_INT_EN);
420 writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
421 << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
422 base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
423 writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
424 sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
425 fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
426 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
427 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
428 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
429 writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
431 writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
432 writel(CIO2_CGC_PRIM_TGE |
436 CIO2_CGC_CSI2_INTERFRAME_TGE |
437 CIO2_CGC_CSI2_PORT_DCGE |
442 CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
443 CIO2_CGC_CSI_CLKGATE_HOLDOFF
444 << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
445 writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
446 writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
447 CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
448 CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
449 CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
450 base + CIO2_REG_LTRVAL01);
451 writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
452 CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
453 CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
454 CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
455 base + CIO2_REG_LTRVAL23);
457 for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
458 writel(0, base + CIO2_REG_CDMABA(i));
459 writel(0, base + CIO2_REG_CDMAC0(i));
460 writel(0, base + CIO2_REG_CDMAC1(i));
464 writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
466 writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
467 FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
468 CIO2_CDMAC0_DMA_INTR_ON_FE |
469 CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
471 CIO2_CDMAC0_DMA_INTR_ON_FS |
472 CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
474 writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
475 base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
477 writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
479 writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
480 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
481 CIO2_PXM_FRF_CFG_MSK_ECC_RE |
482 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
483 base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
485 /* Clear interrupts */
486 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
487 writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
488 writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
489 writel(~0, base + CIO2_REG_INT_STS);
491 /* Enable devices, starting from the last device in the pipe */
492 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
493 writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
498 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
500 void __iomem *const base = cio2->base;
505 /* Disable CSI receiver and MIPI backend devices */
506 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
507 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
508 writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
509 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
512 writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
513 ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
514 value, value & CIO2_CDMAC0_DMA_HALTED,
517 dev_err(&cio2->pci_dev->dev,
518 "DMA %i can not be halted\n", CIO2_DMA_CHAN);
520 for (i = 0; i < CIO2_NUM_PORTS; i++) {
521 writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
522 CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
523 writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
524 CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
528 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
530 struct device *dev = &cio2->pci_dev->dev;
531 struct cio2_queue *q = cio2->cur_queue;
532 struct cio2_fbpt_entry *entry;
533 u64 ns = ktime_get_ns();
535 if (dma_chan >= CIO2_QUEUES) {
536 dev_err(dev, "bad DMA channel %i\n", dma_chan);
540 entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
541 if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
542 dev_warn(&cio2->pci_dev->dev,
543 "no ready buffers found on DMA channel %u\n",
548 /* Find out which buffer(s) are ready */
550 struct cio2_buffer *b;
552 b = q->bufs[q->bufs_first];
554 unsigned int received = entry[1].second_entry.num_of_bytes;
555 unsigned long payload =
556 vb2_get_plane_payload(&b->vbb.vb2_buf, 0);
558 q->bufs[q->bufs_first] = NULL;
559 atomic_dec(&q->bufs_queued);
560 dev_dbg(&cio2->pci_dev->dev,
561 "buffer %i done\n", b->vbb.vb2_buf.index);
563 b->vbb.vb2_buf.timestamp = ns;
564 b->vbb.field = V4L2_FIELD_NONE;
565 b->vbb.sequence = atomic_read(&q->frame_sequence);
566 if (payload != received)
568 "payload length is %lu, received %u\n",
570 vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
572 atomic_inc(&q->frame_sequence);
573 cio2_fbpt_entry_init_dummy(cio2, entry);
574 q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
575 entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
576 } while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
579 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
582 * For the user space camera control algorithms it is essential
583 * to know when the reception of a frame has begun. That's often
584 * the best timing information to get from the hardware.
586 struct v4l2_event event = {
587 .type = V4L2_EVENT_FRAME_SYNC,
588 .u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
591 v4l2_event_queue(q->subdev.devnode, &event);
594 static const char *const cio2_irq_errs[] = {
595 "single packet header error corrected",
596 "multiple packet header errors detected",
597 "payload checksum (CRC) error",
599 "reserved short packet data type detected",
600 "reserved long packet data type detected",
601 "incomplete long packet detected",
604 "DPHY start of transmission error",
605 "DPHY synchronization error",
607 "escape mode trigger event",
608 "escape mode ultra-low power state for data lane(s)",
609 "escape mode ultra-low power state exit for clock lane",
610 "inter-frame short packet discarded",
611 "inter-frame long packet discarded",
612 "non-matching Long Packet stalled",
615 static const char *const cio2_port_errs[] = {
617 "DPHY not recoverable",
618 "ECC not recoverable",
625 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
627 void __iomem *const base = cio2->base;
628 struct device *dev = &cio2->pci_dev->dev;
630 if (int_status & CIO2_INT_IOOE) {
632 * Interrupt on Output Error:
633 * 1) SRAM is full and FS received, or
634 * 2) An invalid bit detected by DMA.
636 u32 oe_status, oe_clear;
638 oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
639 oe_status = oe_clear;
641 if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
642 dev_err(dev, "DMA output error: 0x%x\n",
643 (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
644 >> CIO2_INT_EXT_OE_DMAOE_SHIFT);
645 oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
647 if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
648 dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
649 (oe_status & CIO2_INT_EXT_OE_OES_MASK)
650 >> CIO2_INT_EXT_OE_OES_SHIFT);
651 oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
653 writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
655 dev_warn(dev, "unknown interrupt 0x%x on OE\n",
657 int_status &= ~CIO2_INT_IOOE;
660 if (int_status & CIO2_INT_IOC_MASK) {
661 /* DMA IO done -- frame ready */
665 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
666 if (int_status & CIO2_INT_IOC(d)) {
667 clr |= CIO2_INT_IOC(d);
668 cio2_buffer_done(cio2, d);
673 if (int_status & CIO2_INT_IOS_IOLN_MASK) {
674 /* DMA IO starts or reached specified line */
678 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
679 if (int_status & CIO2_INT_IOS_IOLN(d)) {
680 clr |= CIO2_INT_IOS_IOLN(d);
681 if (d == CIO2_DMA_CHAN)
682 cio2_queue_event_sof(cio2,
688 if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
689 /* CSI2 receiver (error) interrupt */
690 u32 ie_status, ie_clear;
693 ie_clear = readl(base + CIO2_REG_INT_STS_EXT_IE);
694 ie_status = ie_clear;
696 for (port = 0; port < CIO2_NUM_PORTS; port++) {
697 u32 port_status = (ie_status >> (port * 8)) & 0xff;
698 u32 err_mask = BIT_MASK(ARRAY_SIZE(cio2_port_errs)) - 1;
699 void __iomem *const csi_rx_base =
700 base + CIO2_REG_PIPE_BASE(port);
703 while (port_status & err_mask) {
704 i = ffs(port_status) - 1;
705 dev_err(dev, "port %i error %s\n",
706 port, cio2_port_errs[i]);
707 ie_status &= ~BIT(port * 8 + i);
708 port_status &= ~BIT(i);
711 if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
712 u32 csi2_status, csi2_clear;
714 csi2_status = readl(csi_rx_base +
715 CIO2_REG_IRQCTRL_STATUS);
716 csi2_clear = csi2_status;
718 BIT_MASK(ARRAY_SIZE(cio2_irq_errs)) - 1;
720 while (csi2_status & err_mask) {
721 i = ffs(csi2_status) - 1;
723 "CSI-2 receiver port %i: %s\n",
724 port, cio2_irq_errs[i]);
725 csi2_status &= ~BIT(i);
729 csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
732 "unknown CSI2 error 0x%x on port %i\n",
735 ie_status &= ~CIO2_INT_EXT_IE_IRQ(port);
739 writel(ie_clear, base + CIO2_REG_INT_STS_EXT_IE);
741 dev_warn(dev, "unknown interrupt 0x%x on IE\n",
744 int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
748 dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
751 static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
753 struct cio2_device *cio2 = cio2_ptr;
754 void __iomem *const base = cio2->base;
755 struct device *dev = &cio2->pci_dev->dev;
758 int_status = readl(base + CIO2_REG_INT_STS);
759 dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
764 writel(int_status, base + CIO2_REG_INT_STS);
765 cio2_irq_handle_once(cio2, int_status);
766 int_status = readl(base + CIO2_REG_INT_STS);
768 dev_dbg(dev, "pending status 0x%x\n", int_status);
769 } while (int_status);
774 /**************** Videobuf2 interface ****************/
776 static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
777 enum vb2_buffer_state state)
781 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
783 atomic_dec(&q->bufs_queued);
784 vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
791 static int cio2_vb2_queue_setup(struct vb2_queue *vq,
792 unsigned int *num_buffers,
793 unsigned int *num_planes,
794 unsigned int sizes[],
795 struct device *alloc_devs[])
797 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
798 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
801 *num_planes = q->format.num_planes;
803 for (i = 0; i < *num_planes; ++i) {
804 sizes[i] = q->format.plane_fmt[i].sizeimage;
805 alloc_devs[i] = &cio2->pci_dev->dev;
808 *num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
810 /* Initialize buffer queue */
811 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
813 cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
815 atomic_set(&q->bufs_queued, 0);
822 /* Called after each buffer is allocated */
823 static int cio2_vb2_buf_init(struct vb2_buffer *vb)
825 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
826 struct device *dev = &cio2->pci_dev->dev;
827 struct cio2_buffer *b =
828 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
829 unsigned int pages = PFN_UP(vb->planes[0].length);
830 unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
832 struct sg_dma_page_iter sg_iter;
835 if (lops <= 0 || lops > CIO2_MAX_LOPS) {
836 dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
837 vb->planes[0].length);
838 return -ENOSPC; /* Should never happen */
841 memset(b->lop, 0, sizeof(b->lop));
842 /* Allocate LOP table */
843 for (i = 0; i < lops; i++) {
844 b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
845 &b->lop_bus_addr[i], GFP_KERNEL);
851 sg = vb2_dma_sg_plane_desc(vb, 0);
855 if (sg->nents && sg->sgl)
856 b->offset = sg->sgl->offset;
859 for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
862 b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
864 if (j == CIO2_LOP_ENTRIES) {
870 b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
874 dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
878 /* Transfer buffer ownership to cio2 */
879 static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
881 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
882 struct cio2_queue *q =
883 container_of(vb->vb2_queue, struct cio2_queue, vbq);
884 struct cio2_buffer *b =
885 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
886 struct cio2_fbpt_entry *entry;
888 unsigned int i, j, next = q->bufs_next;
889 int bufs_queued = atomic_inc_return(&q->bufs_queued);
892 dev_dbg(&cio2->pci_dev->dev, "queue buffer %d\n", vb->index);
895 * This code queues the buffer to the CIO2 DMA engine, which starts
896 * running once streaming has started. It is possible that this code
897 * gets pre-empted due to increased CPU load. Upon this, the driver
898 * does not get an opportunity to queue new buffers to the CIO2 DMA
899 * engine. When the DMA engine encounters an FBPT entry without the
900 * VALID bit set, the DMA engine halts, which requires a restart of
901 * the DMA engine and sensor, to continue streaming.
902 * This is not desired and is highly unlikely given that there are
903 * 32 FBPT entries that the DMA engine needs to process, to run into
904 * an FBPT entry, without the VALID bit set. We try to mitigate this
905 * by disabling interrupts for the duration of this queueing.
907 local_irq_save(flags);
909 fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
910 >> CIO2_CDMARI_FBPT_RP_SHIFT)
911 & CIO2_CDMARI_FBPT_RP_MASK;
914 * fbpt_rp is the fbpt entry that the dma is currently working
915 * on, but since it could jump to next entry at any time,
916 * assume that we might already be there.
918 fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
920 if (bufs_queued <= 1 || fbpt_rp == next)
921 /* Buffers were drained */
922 next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
924 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
926 * We have allocated CIO2_MAX_BUFFERS circularly for the
927 * hw, the user has requested N buffer queue. The driver
928 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
929 * user queues a buffer, there necessarily is a free buffer.
931 if (!q->bufs[next]) {
933 entry = &q->fbpt[next * CIO2_MAX_LOPS];
934 cio2_fbpt_entry_init_buf(cio2, b, entry);
935 local_irq_restore(flags);
936 q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
937 for (j = 0; j < vb->num_planes; j++)
938 vb2_set_plane_payload(vb, j,
939 q->format.plane_fmt[j].sizeimage);
943 dev_dbg(&cio2->pci_dev->dev, "entry %i was full!\n", next);
944 next = (next + 1) % CIO2_MAX_BUFFERS;
947 local_irq_restore(flags);
948 dev_err(&cio2->pci_dev->dev, "error: all cio2 entries were full!\n");
949 atomic_dec(&q->bufs_queued);
950 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
953 /* Called when each buffer is freed */
954 static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
956 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
957 struct cio2_buffer *b =
958 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
962 for (i = 0; i < CIO2_MAX_LOPS; i++) {
964 dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
965 b->lop[i], b->lop_bus_addr[i]);
969 static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
971 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
972 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
976 atomic_set(&q->frame_sequence, 0);
978 r = pm_runtime_get_sync(&cio2->pci_dev->dev);
980 dev_info(&cio2->pci_dev->dev, "failed to set power %d\n", r);
981 pm_runtime_put_noidle(&cio2->pci_dev->dev);
985 r = media_pipeline_start(&q->vdev.entity, &q->pipe);
989 r = cio2_hw_init(cio2, q);
993 /* Start streaming on sensor */
994 r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
996 goto fail_csi2_subdev;
998 cio2->streaming = true;
1003 cio2_hw_exit(cio2, q);
1005 media_pipeline_stop(&q->vdev.entity);
1007 dev_dbg(&cio2->pci_dev->dev, "failed to start streaming (%d)\n", r);
1008 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1009 pm_runtime_put(&cio2->pci_dev->dev);
1014 static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1016 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1017 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1019 if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1020 dev_err(&cio2->pci_dev->dev,
1021 "failed to stop sensor streaming\n");
1023 cio2_hw_exit(cio2, q);
1024 synchronize_irq(cio2->pci_dev->irq);
1025 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1026 media_pipeline_stop(&q->vdev.entity);
1027 pm_runtime_put(&cio2->pci_dev->dev);
1028 cio2->streaming = false;
1031 static const struct vb2_ops cio2_vb2_ops = {
1032 .buf_init = cio2_vb2_buf_init,
1033 .buf_queue = cio2_vb2_buf_queue,
1034 .buf_cleanup = cio2_vb2_buf_cleanup,
1035 .queue_setup = cio2_vb2_queue_setup,
1036 .start_streaming = cio2_vb2_start_streaming,
1037 .stop_streaming = cio2_vb2_stop_streaming,
1038 .wait_prepare = vb2_ops_wait_prepare,
1039 .wait_finish = vb2_ops_wait_finish,
1042 /**************** V4L2 interface ****************/
1044 static int cio2_v4l2_querycap(struct file *file, void *fh,
1045 struct v4l2_capability *cap)
1047 struct cio2_device *cio2 = video_drvdata(file);
1049 strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1050 strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1051 snprintf(cap->bus_info, sizeof(cap->bus_info),
1052 "PCI:%s", pci_name(cio2->pci_dev));
1057 static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1058 struct v4l2_fmtdesc *f)
1060 if (f->index >= ARRAY_SIZE(formats))
1063 f->pixelformat = formats[f->index].fourcc;
1068 /* The format is validated in cio2_video_link_validate() */
1069 static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1071 struct cio2_queue *q = file_to_cio2_queue(file);
1073 f->fmt.pix_mp = q->format;
1078 static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1080 const struct ipu3_cio2_fmt *fmt;
1081 struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1083 fmt = cio2_find_format(&mpix->pixelformat, NULL);
1087 /* Only supports up to 4224x3136 */
1088 if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1089 mpix->width = CIO2_IMAGE_MAX_WIDTH;
1090 if (mpix->height > CIO2_IMAGE_MAX_HEIGHT)
1091 mpix->height = CIO2_IMAGE_MAX_HEIGHT;
1093 mpix->num_planes = 1;
1094 mpix->pixelformat = fmt->fourcc;
1095 mpix->colorspace = V4L2_COLORSPACE_RAW;
1096 mpix->field = V4L2_FIELD_NONE;
1097 mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1098 mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1102 mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1103 mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1104 mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1109 static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1111 struct cio2_queue *q = file_to_cio2_queue(file);
1113 cio2_v4l2_try_fmt(file, fh, f);
1114 q->format = f->fmt.pix_mp;
1120 cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1122 if (input->index > 0)
1125 strscpy(input->name, "camera", sizeof(input->name));
1126 input->type = V4L2_INPUT_TYPE_CAMERA;
1132 cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1140 cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1142 return input == 0 ? 0 : -EINVAL;
1145 static const struct v4l2_file_operations cio2_v4l2_fops = {
1146 .owner = THIS_MODULE,
1147 .unlocked_ioctl = video_ioctl2,
1148 .open = v4l2_fh_open,
1149 .release = vb2_fop_release,
1150 .poll = vb2_fop_poll,
1151 .mmap = vb2_fop_mmap,
1154 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1155 .vidioc_querycap = cio2_v4l2_querycap,
1156 .vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
1157 .vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1158 .vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1159 .vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1160 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1161 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1162 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1163 .vidioc_querybuf = vb2_ioctl_querybuf,
1164 .vidioc_qbuf = vb2_ioctl_qbuf,
1165 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1166 .vidioc_streamon = vb2_ioctl_streamon,
1167 .vidioc_streamoff = vb2_ioctl_streamoff,
1168 .vidioc_expbuf = vb2_ioctl_expbuf,
1169 .vidioc_enum_input = cio2_video_enum_input,
1170 .vidioc_g_input = cio2_video_g_input,
1171 .vidioc_s_input = cio2_video_s_input,
1174 static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1176 struct v4l2_event_subscription *sub)
1178 if (sub->type != V4L2_EVENT_FRAME_SYNC)
1181 /* Line number. For now only zero accepted. */
1185 return v4l2_event_subscribe(fh, sub, 0, NULL);
1188 static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1190 struct v4l2_mbus_framefmt *format;
1191 const struct v4l2_mbus_framefmt fmt_default = {
1194 .code = formats[0].mbus_code,
1195 .field = V4L2_FIELD_NONE,
1196 .colorspace = V4L2_COLORSPACE_RAW,
1197 .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1198 .quantization = V4L2_QUANTIZATION_DEFAULT,
1199 .xfer_func = V4L2_XFER_FUNC_DEFAULT,
1202 /* Initialize try_fmt */
1203 format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SINK);
1204 *format = fmt_default;
1207 format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SOURCE);
1208 *format = fmt_default;
1214 * cio2_subdev_get_fmt - Handle get format by pads subdev method
1215 * @sd : pointer to v4l2 subdev structure
1216 * @cfg: V4L2 subdev pad config
1217 * @fmt: pointer to v4l2 subdev format structure
1218 * return -EINVAL or zero on success
1220 static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1221 struct v4l2_subdev_pad_config *cfg,
1222 struct v4l2_subdev_format *fmt)
1224 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1226 mutex_lock(&q->subdev_lock);
1228 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1229 fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
1231 fmt->format = q->subdev_fmt;
1233 mutex_unlock(&q->subdev_lock);
1239 * cio2_subdev_set_fmt - Handle set format by pads subdev method
1240 * @sd : pointer to v4l2 subdev structure
1241 * @cfg: V4L2 subdev pad config
1242 * @fmt: pointer to v4l2 subdev format structure
1243 * return -EINVAL or zero on success
1245 static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1246 struct v4l2_subdev_pad_config *cfg,
1247 struct v4l2_subdev_format *fmt)
1249 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1250 struct v4l2_mbus_framefmt *mbus;
1251 u32 mbus_code = fmt->format.code;
1255 * Only allow setting sink pad format;
1256 * source always propagates from sink
1258 if (fmt->pad == CIO2_PAD_SOURCE)
1259 return cio2_subdev_get_fmt(sd, cfg, fmt);
1261 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1262 mbus = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
1264 mbus = &q->subdev_fmt;
1266 fmt->format.code = formats[0].mbus_code;
1268 for (i = 0; i < ARRAY_SIZE(formats); i++) {
1269 if (formats[i].mbus_code == mbus_code) {
1270 fmt->format.code = mbus_code;
1275 fmt->format.width = min(fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
1276 fmt->format.height = min(fmt->format.height, CIO2_IMAGE_MAX_HEIGHT);
1277 fmt->format.field = V4L2_FIELD_NONE;
1279 mutex_lock(&q->subdev_lock);
1280 *mbus = fmt->format;
1281 mutex_unlock(&q->subdev_lock);
1286 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1287 struct v4l2_subdev_pad_config *cfg,
1288 struct v4l2_subdev_mbus_code_enum *code)
1290 if (code->index >= ARRAY_SIZE(formats))
1293 code->code = formats[code->index].mbus_code;
1297 static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1298 struct v4l2_subdev_format *fmt)
1300 if (is_media_entity_v4l2_subdev(pad->entity)) {
1301 struct v4l2_subdev *sd =
1302 media_entity_to_v4l2_subdev(pad->entity);
1304 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1305 fmt->pad = pad->index;
1306 return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1312 static int cio2_video_link_validate(struct media_link *link)
1314 struct video_device *vd = container_of(link->sink->entity,
1315 struct video_device, entity);
1316 struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1317 struct cio2_device *cio2 = video_get_drvdata(vd);
1318 struct v4l2_subdev_format source_fmt;
1321 if (!media_entity_remote_pad(link->sink->entity->pads)) {
1322 dev_info(&cio2->pci_dev->dev,
1323 "video node %s pad not connected\n", vd->name);
1327 ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1331 if (source_fmt.format.width != q->format.width ||
1332 source_fmt.format.height != q->format.height) {
1333 dev_err(&cio2->pci_dev->dev,
1334 "Wrong width or height %ux%u (%ux%u expected)\n",
1335 q->format.width, q->format.height,
1336 source_fmt.format.width, source_fmt.format.height);
1340 if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1346 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1347 .subscribe_event = cio2_subdev_subscribe_event,
1348 .unsubscribe_event = v4l2_event_subdev_unsubscribe,
1351 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1352 .open = cio2_subdev_open,
1355 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1356 .link_validate = v4l2_subdev_link_validate_default,
1357 .get_fmt = cio2_subdev_get_fmt,
1358 .set_fmt = cio2_subdev_set_fmt,
1359 .enum_mbus_code = cio2_subdev_enum_mbus_code,
1362 static const struct v4l2_subdev_ops cio2_subdev_ops = {
1363 .core = &cio2_subdev_core_ops,
1364 .pad = &cio2_subdev_pad_ops,
1367 /******* V4L2 sub-device asynchronous registration callbacks***********/
1369 struct sensor_async_subdev {
1370 struct v4l2_async_subdev asd;
1371 struct csi2_bus_info csi2;
1374 /* The .bound() notifier callback when a match is found */
1375 static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1376 struct v4l2_subdev *sd,
1377 struct v4l2_async_subdev *asd)
1379 struct cio2_device *cio2 = container_of(notifier,
1380 struct cio2_device, notifier);
1381 struct sensor_async_subdev *s_asd = container_of(asd,
1382 struct sensor_async_subdev, asd);
1383 struct cio2_queue *q;
1385 if (cio2->queue[s_asd->csi2.port].sensor)
1388 q = &cio2->queue[s_asd->csi2.port];
1390 q->csi2 = s_asd->csi2;
1392 q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1397 /* The .unbind callback */
1398 static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1399 struct v4l2_subdev *sd,
1400 struct v4l2_async_subdev *asd)
1402 struct cio2_device *cio2 = container_of(notifier,
1403 struct cio2_device, notifier);
1404 struct sensor_async_subdev *s_asd = container_of(asd,
1405 struct sensor_async_subdev, asd);
1407 cio2->queue[s_asd->csi2.port].sensor = NULL;
1410 /* .complete() is called after all subdevices have been located */
1411 static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1413 struct cio2_device *cio2 = container_of(notifier, struct cio2_device,
1415 struct sensor_async_subdev *s_asd;
1416 struct v4l2_async_subdev *asd;
1417 struct cio2_queue *q;
1421 list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) {
1422 s_asd = container_of(asd, struct sensor_async_subdev, asd);
1423 q = &cio2->queue[s_asd->csi2.port];
1425 for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
1426 if (q->sensor->entity.pads[pad].flags &
1427 MEDIA_PAD_FL_SOURCE)
1430 if (pad == q->sensor->entity.num_pads) {
1431 dev_err(&cio2->pci_dev->dev,
1432 "failed to find src pad for %s\n",
1437 ret = media_create_pad_link(
1438 &q->sensor->entity, pad,
1439 &q->subdev.entity, CIO2_PAD_SINK,
1442 dev_err(&cio2->pci_dev->dev,
1443 "failed to create link for %s\n",
1449 return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1452 static const struct v4l2_async_notifier_operations cio2_async_ops = {
1453 .bound = cio2_notifier_bound,
1454 .unbind = cio2_notifier_unbind,
1455 .complete = cio2_notifier_complete,
1458 static int cio2_parse_firmware(struct cio2_device *cio2)
1463 for (i = 0; i < CIO2_NUM_PORTS; i++) {
1464 struct v4l2_fwnode_endpoint vep = {
1465 .bus_type = V4L2_MBUS_CSI2_DPHY
1467 struct sensor_async_subdev *s_asd;
1468 struct fwnode_handle *ep;
1470 ep = fwnode_graph_get_endpoint_by_id(
1471 dev_fwnode(&cio2->pci_dev->dev), i, 0,
1472 FWNODE_GRAPH_ENDPOINT_NEXT);
1477 ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1481 s_asd = v4l2_async_notifier_add_fwnode_remote_subdev(
1482 &cio2->notifier, ep, struct sensor_async_subdev);
1483 if (IS_ERR(s_asd)) {
1484 ret = PTR_ERR(s_asd);
1488 s_asd->csi2.port = vep.base.port;
1489 s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1491 fwnode_handle_put(ep);
1496 fwnode_handle_put(ep);
1501 * Proceed even without sensors connected to allow the device to
1504 cio2->notifier.ops = &cio2_async_ops;
1505 ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
1507 dev_err(&cio2->pci_dev->dev,
1508 "failed to register async notifier : %d\n", ret);
1513 /**************** Queue initialization ****************/
1514 static const struct media_entity_operations cio2_media_ops = {
1515 .link_validate = v4l2_subdev_link_validate,
1518 static const struct media_entity_operations cio2_video_entity_ops = {
1519 .link_validate = cio2_video_link_validate,
1522 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1524 static const u32 default_width = 1936;
1525 static const u32 default_height = 1096;
1526 const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1528 struct video_device *vdev = &q->vdev;
1529 struct vb2_queue *vbq = &q->vbq;
1530 struct v4l2_subdev *subdev = &q->subdev;
1531 struct v4l2_mbus_framefmt *fmt;
1534 /* Initialize miscellaneous variables */
1535 mutex_init(&q->lock);
1536 mutex_init(&q->subdev_lock);
1538 /* Initialize formats to default values */
1539 fmt = &q->subdev_fmt;
1540 fmt->width = default_width;
1541 fmt->height = default_height;
1542 fmt->code = dflt_fmt.mbus_code;
1543 fmt->field = V4L2_FIELD_NONE;
1545 q->format.width = default_width;
1546 q->format.height = default_height;
1547 q->format.pixelformat = dflt_fmt.fourcc;
1548 q->format.colorspace = V4L2_COLORSPACE_RAW;
1549 q->format.field = V4L2_FIELD_NONE;
1550 q->format.num_planes = 1;
1551 q->format.plane_fmt[0].bytesperline =
1552 cio2_bytesperline(q->format.width);
1553 q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1556 /* Initialize fbpt */
1557 r = cio2_fbpt_init(cio2, q);
1561 /* Initialize media entities */
1562 q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1563 MEDIA_PAD_FL_MUST_CONNECT;
1564 q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1565 subdev->entity.ops = &cio2_media_ops;
1566 subdev->internal_ops = &cio2_subdev_internal_ops;
1567 r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1569 dev_err(&cio2->pci_dev->dev,
1570 "failed initialize subdev media entity (%d)\n", r);
1571 goto fail_subdev_media_entity;
1574 q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1575 vdev->entity.ops = &cio2_video_entity_ops;
1576 r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1578 dev_err(&cio2->pci_dev->dev,
1579 "failed initialize videodev media entity (%d)\n", r);
1580 goto fail_vdev_media_entity;
1583 /* Initialize subdev */
1584 v4l2_subdev_init(subdev, &cio2_subdev_ops);
1585 subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1586 subdev->owner = THIS_MODULE;
1587 snprintf(subdev->name, sizeof(subdev->name),
1588 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1589 subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
1590 v4l2_set_subdevdata(subdev, cio2);
1591 r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1593 dev_err(&cio2->pci_dev->dev,
1594 "failed initialize subdev (%d)\n", r);
1598 /* Initialize vbq */
1599 vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1600 vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1601 vbq->ops = &cio2_vb2_ops;
1602 vbq->mem_ops = &vb2_dma_sg_memops;
1603 vbq->buf_struct_size = sizeof(struct cio2_buffer);
1604 vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1605 vbq->min_buffers_needed = 1;
1606 vbq->drv_priv = cio2;
1607 vbq->lock = &q->lock;
1608 r = vb2_queue_init(vbq);
1610 dev_err(&cio2->pci_dev->dev,
1611 "failed to initialize videobuf2 queue (%d)\n", r);
1615 /* Initialize vdev */
1616 snprintf(vdev->name, sizeof(vdev->name),
1617 "%s %td", CIO2_NAME, q - cio2->queue);
1618 vdev->release = video_device_release_empty;
1619 vdev->fops = &cio2_v4l2_fops;
1620 vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1621 vdev->lock = &cio2->lock;
1622 vdev->v4l2_dev = &cio2->v4l2_dev;
1623 vdev->queue = &q->vbq;
1624 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1625 video_set_drvdata(vdev, cio2);
1626 r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1628 dev_err(&cio2->pci_dev->dev,
1629 "failed to register video device (%d)\n", r);
1633 /* Create link from CIO2 subdev to output node */
1634 r = media_create_pad_link(
1635 &subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1636 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1643 vb2_video_unregister_device(&q->vdev);
1645 v4l2_device_unregister_subdev(subdev);
1647 media_entity_cleanup(&vdev->entity);
1648 fail_vdev_media_entity:
1649 media_entity_cleanup(&subdev->entity);
1650 fail_subdev_media_entity:
1651 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1653 mutex_destroy(&q->subdev_lock);
1654 mutex_destroy(&q->lock);
1659 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1661 vb2_video_unregister_device(&q->vdev);
1662 media_entity_cleanup(&q->vdev.entity);
1663 v4l2_device_unregister_subdev(&q->subdev);
1664 media_entity_cleanup(&q->subdev.entity);
1665 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1666 mutex_destroy(&q->subdev_lock);
1667 mutex_destroy(&q->lock);
1670 static int cio2_queues_init(struct cio2_device *cio2)
1674 for (i = 0; i < CIO2_QUEUES; i++) {
1675 r = cio2_queue_init(cio2, &cio2->queue[i]);
1680 if (i == CIO2_QUEUES)
1683 for (i--; i >= 0; i--)
1684 cio2_queue_exit(cio2, &cio2->queue[i]);
1689 static void cio2_queues_exit(struct cio2_device *cio2)
1693 for (i = 0; i < CIO2_QUEUES; i++)
1694 cio2_queue_exit(cio2, &cio2->queue[i]);
1697 static int cio2_check_fwnode_graph(struct fwnode_handle *fwnode)
1699 struct fwnode_handle *endpoint;
1701 if (IS_ERR_OR_NULL(fwnode))
1704 endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
1706 fwnode_handle_put(endpoint);
1710 return cio2_check_fwnode_graph(fwnode->secondary);
1713 /**************** PCI interface ****************/
1715 static int cio2_pci_probe(struct pci_dev *pci_dev,
1716 const struct pci_device_id *id)
1718 struct fwnode_handle *fwnode = dev_fwnode(&pci_dev->dev);
1719 struct cio2_device *cio2;
1722 cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL);
1725 cio2->pci_dev = pci_dev;
1728 * On some platforms no connections to sensors are defined in firmware,
1729 * if the device has no endpoints then we can try to build those as
1730 * software_nodes parsed from SSDB.
1732 r = cio2_check_fwnode_graph(fwnode);
1734 if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
1735 dev_err(&pci_dev->dev, "fwnode graph has no endpoints connected\n");
1739 r = cio2_bridge_init(pci_dev);
1744 r = pcim_enable_device(pci_dev);
1746 dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
1750 dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
1751 pci_dev->device, pci_dev->revision);
1753 r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1755 dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
1759 cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
1761 pci_set_drvdata(pci_dev, cio2);
1763 pci_set_master(pci_dev);
1765 r = pci_set_dma_mask(pci_dev, CIO2_DMA_MASK);
1767 dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
1771 r = pci_enable_msi(pci_dev);
1773 dev_err(&pci_dev->dev, "failed to enable MSI (%d)\n", r);
1777 r = cio2_fbpt_init_dummy(cio2);
1781 mutex_init(&cio2->lock);
1783 cio2->media_dev.dev = &cio2->pci_dev->dev;
1784 strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1785 sizeof(cio2->media_dev.model));
1786 snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info),
1787 "PCI:%s", pci_name(cio2->pci_dev));
1788 cio2->media_dev.hw_revision = 0;
1790 media_device_init(&cio2->media_dev);
1791 r = media_device_register(&cio2->media_dev);
1793 goto fail_mutex_destroy;
1795 cio2->v4l2_dev.mdev = &cio2->media_dev;
1796 r = v4l2_device_register(&pci_dev->dev, &cio2->v4l2_dev);
1798 dev_err(&pci_dev->dev,
1799 "failed to register V4L2 device (%d)\n", r);
1800 goto fail_media_device_unregister;
1803 r = cio2_queues_init(cio2);
1805 goto fail_v4l2_device_unregister;
1807 v4l2_async_notifier_init(&cio2->notifier);
1809 /* Register notifier for subdevices we care */
1810 r = cio2_parse_firmware(cio2);
1812 goto fail_clean_notifier;
1814 r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq,
1815 IRQF_SHARED, CIO2_NAME, cio2);
1817 dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
1818 goto fail_clean_notifier;
1821 pm_runtime_put_noidle(&pci_dev->dev);
1822 pm_runtime_allow(&pci_dev->dev);
1826 fail_clean_notifier:
1827 v4l2_async_notifier_unregister(&cio2->notifier);
1828 v4l2_async_notifier_cleanup(&cio2->notifier);
1829 cio2_queues_exit(cio2);
1830 fail_v4l2_device_unregister:
1831 v4l2_device_unregister(&cio2->v4l2_dev);
1832 fail_media_device_unregister:
1833 media_device_unregister(&cio2->media_dev);
1834 media_device_cleanup(&cio2->media_dev);
1836 mutex_destroy(&cio2->lock);
1837 cio2_fbpt_exit_dummy(cio2);
1842 static void cio2_pci_remove(struct pci_dev *pci_dev)
1844 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1846 media_device_unregister(&cio2->media_dev);
1847 v4l2_async_notifier_unregister(&cio2->notifier);
1848 v4l2_async_notifier_cleanup(&cio2->notifier);
1849 cio2_queues_exit(cio2);
1850 cio2_fbpt_exit_dummy(cio2);
1851 v4l2_device_unregister(&cio2->v4l2_dev);
1852 media_device_cleanup(&cio2->media_dev);
1853 mutex_destroy(&cio2->lock);
1856 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1858 struct pci_dev *pci_dev = to_pci_dev(dev);
1859 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1860 void __iomem *const base = cio2->base;
1863 writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1864 dev_dbg(dev, "cio2 runtime suspend.\n");
1866 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1867 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1868 pm |= CIO2_PMCSR_D3;
1869 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1874 static int __maybe_unused cio2_runtime_resume(struct device *dev)
1876 struct pci_dev *pci_dev = to_pci_dev(dev);
1877 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1878 void __iomem *const base = cio2->base;
1881 writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1882 dev_dbg(dev, "cio2 runtime resume.\n");
1884 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1885 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1886 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1892 * Helper function to advance all the elements of a circular buffer by "start"
1895 static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1901 { start, elems - 1 },
1904 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1906 /* Loop as long as we have out-of-place entries */
1907 while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1911 * Find the number of entries that can be arranged on this
1914 size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1916 /* Swap the entries in two parts of the array. */
1917 for (i = 0; i < size0; i++) {
1918 u8 *d = ptr + elem_size * (arr[1].begin + i);
1919 u8 *s = ptr + elem_size * (arr[0].begin + i);
1922 for (j = 0; j < elem_size; j++)
1926 if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1927 /* The end of the first array remains unarranged. */
1928 arr[0].begin += size0;
1931 * The first array is fully arranged so we proceed
1932 * handling the next one.
1934 arr[0].begin = arr[1].begin;
1935 arr[0].end = arr[1].begin + size0 - 1;
1936 arr[1].begin += size0;
1941 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1945 for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1946 i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1950 if (i == CIO2_MAX_BUFFERS)
1954 arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1955 CIO2_MAX_BUFFERS, j);
1956 arrange(q->bufs, sizeof(struct cio2_buffer *),
1957 CIO2_MAX_BUFFERS, j);
1961 * DMA clears the valid bit when accessing the buffer.
1962 * When stopping stream in suspend callback, some of the buffers
1963 * may be in invalid state. After resume, when DMA meets the invalid
1964 * buffer, it will halt and stop receiving new data.
1965 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1967 for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1968 cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1971 static int __maybe_unused cio2_suspend(struct device *dev)
1973 struct pci_dev *pci_dev = to_pci_dev(dev);
1974 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1975 struct cio2_queue *q = cio2->cur_queue;
1977 dev_dbg(dev, "cio2 suspend\n");
1978 if (!cio2->streaming)
1982 cio2_hw_exit(cio2, q);
1983 synchronize_irq(pci_dev->irq);
1985 pm_runtime_force_suspend(dev);
1988 * Upon resume, hw starts to process the fbpt entries from beginning,
1989 * so relocate the queued buffs to the fbpt head before suspend.
1991 cio2_fbpt_rearrange(cio2, q);
1998 static int __maybe_unused cio2_resume(struct device *dev)
2000 struct cio2_device *cio2 = dev_get_drvdata(dev);
2001 struct cio2_queue *q = cio2->cur_queue;
2004 dev_dbg(dev, "cio2 resume\n");
2005 if (!cio2->streaming)
2008 r = pm_runtime_force_resume(&cio2->pci_dev->dev);
2010 dev_err(&cio2->pci_dev->dev,
2011 "failed to set power %d\n", r);
2015 r = cio2_hw_init(cio2, q);
2017 dev_err(dev, "fail to init cio2 hw\n");
2022 static const struct dev_pm_ops cio2_pm_ops = {
2023 SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
2024 SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
2027 static const struct pci_device_id cio2_pci_id_table[] = {
2028 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
2032 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
2034 static struct pci_driver cio2_pci_driver = {
2036 .id_table = cio2_pci_id_table,
2037 .probe = cio2_pci_probe,
2038 .remove = cio2_pci_remove,
2044 module_pci_driver(cio2_pci_driver);
2048 MODULE_AUTHOR("Jian Xu Zheng");
2051 MODULE_LICENSE("GPL v2");
2052 MODULE_DESCRIPTION("IPU3 CIO2 driver");