]>
Commit | Line | Data |
---|---|---|
b39082e2 | 1 | // SPDX-License-Identifier: GPL-2.0 |
c2a6a07a | 2 | /* |
b39082e2 | 3 | * Copyright (C) 2017 Intel Corporation |
c2a6a07a YZ |
4 | * |
5 | * Based partially on Intel IPU4 driver written by | |
6 | * Sakari Ailus <[email protected]> | |
7 | * Samu Onkalo <[email protected]> | |
8 | * Jouni Högander <[email protected]> | |
9 | * Jouni Ukkonen <[email protected]> | |
10 | * Antti Laakso <[email protected]> | |
11 | * et al. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include <linux/delay.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/pci.h> | |
19 | #include <linux/pm_runtime.h> | |
20 | #include <linux/property.h> | |
21 | #include <linux/vmalloc.h> | |
22 | #include <media/v4l2-ctrls.h> | |
23 | #include <media/v4l2-device.h> | |
24 | #include <media/v4l2-event.h> | |
25 | #include <media/v4l2-fwnode.h> | |
26 | #include <media/v4l2-ioctl.h> | |
27 | #include <media/videobuf2-dma-sg.h> | |
28 | ||
29 | #include "ipu3-cio2.h" | |
30 | ||
31 | struct ipu3_cio2_fmt { | |
32 | u32 mbus_code; | |
33 | u32 fourcc; | |
34 | u8 mipicode; | |
35 | }; | |
36 | ||
37 | /* | |
38 | * These are raw formats used in Intel's third generation of | |
39 | * Image Processing Unit known as IPU3. | |
40 | * 10bit raw bayer packed, 32 bytes for every 25 pixels, | |
41 | * last LSB 6 bits unused. | |
42 | */ | |
43 | static const struct ipu3_cio2_fmt formats[] = { | |
44 | { /* put default entry at beginning */ | |
45 | .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10, | |
46 | .fourcc = V4L2_PIX_FMT_IPU3_SGRBG10, | |
47 | .mipicode = 0x2b, | |
48 | }, { | |
49 | .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10, | |
50 | .fourcc = V4L2_PIX_FMT_IPU3_SGBRG10, | |
51 | .mipicode = 0x2b, | |
52 | }, { | |
53 | .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10, | |
54 | .fourcc = V4L2_PIX_FMT_IPU3_SBGGR10, | |
55 | .mipicode = 0x2b, | |
56 | }, { | |
57 | .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10, | |
58 | .fourcc = V4L2_PIX_FMT_IPU3_SRGGB10, | |
59 | .mipicode = 0x2b, | |
60 | }, | |
61 | }; | |
62 | ||
63 | /* | |
64 | * cio2_find_format - lookup color format by fourcc or/and media bus code | |
65 | * @pixelformat: fourcc to match, ignored if null | |
66 | * @mbus_code: media bus code to match, ignored if null | |
67 | */ | |
68 | static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat, | |
69 | const u32 *mbus_code) | |
70 | { | |
71 | unsigned int i; | |
72 | ||
73 | for (i = 0; i < ARRAY_SIZE(formats); i++) { | |
74 | if (pixelformat && *pixelformat != formats[i].fourcc) | |
75 | continue; | |
76 | if (mbus_code && *mbus_code != formats[i].mbus_code) | |
77 | continue; | |
78 | ||
79 | return &formats[i]; | |
80 | } | |
81 | ||
82 | return NULL; | |
83 | } | |
84 | ||
85 | static inline u32 cio2_bytesperline(const unsigned int width) | |
86 | { | |
87 | /* | |
88 | * 64 bytes for every 50 pixels, the line length | |
89 | * in bytes is multiple of 64 (line end alignment). | |
90 | */ | |
91 | return DIV_ROUND_UP(width, 50) * 64; | |
92 | } | |
93 | ||
94 | /**************** FBPT operations ****************/ | |
95 | ||
96 | static void cio2_fbpt_exit_dummy(struct cio2_device *cio2) | |
97 | { | |
98 | if (cio2->dummy_lop) { | |
99 | dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE, | |
100 | cio2->dummy_lop, cio2->dummy_lop_bus_addr); | |
101 | cio2->dummy_lop = NULL; | |
102 | } | |
103 | if (cio2->dummy_page) { | |
104 | dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE, | |
105 | cio2->dummy_page, cio2->dummy_page_bus_addr); | |
106 | cio2->dummy_page = NULL; | |
107 | } | |
108 | } | |
109 | ||
110 | static int cio2_fbpt_init_dummy(struct cio2_device *cio2) | |
111 | { | |
112 | unsigned int i; | |
113 | ||
114 | cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev, | |
115 | CIO2_PAGE_SIZE, | |
116 | &cio2->dummy_page_bus_addr, | |
117 | GFP_KERNEL); | |
118 | cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev, | |
119 | CIO2_PAGE_SIZE, | |
120 | &cio2->dummy_lop_bus_addr, | |
121 | GFP_KERNEL); | |
122 | if (!cio2->dummy_page || !cio2->dummy_lop) { | |
123 | cio2_fbpt_exit_dummy(cio2); | |
124 | return -ENOMEM; | |
125 | } | |
126 | /* | |
127 | * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each | |
128 | * Initialize each entry to dummy_page bus base address. | |
129 | */ | |
130 | for (i = 0; i < CIO2_PAGE_SIZE / sizeof(*cio2->dummy_lop); i++) | |
131 | cio2->dummy_lop[i] = cio2->dummy_page_bus_addr >> PAGE_SHIFT; | |
132 | ||
133 | return 0; | |
134 | } | |
135 | ||
136 | static void cio2_fbpt_entry_enable(struct cio2_device *cio2, | |
137 | struct cio2_fbpt_entry entry[CIO2_MAX_LOPS]) | |
138 | { | |
139 | /* | |
140 | * The CPU first initializes some fields in fbpt, then sets | |
141 | * the VALID bit, this barrier is to ensure that the DMA(device) | |
142 | * does not see the VALID bit enabled before other fields are | |
143 | * initialized; otherwise it could lead to havoc. | |
144 | */ | |
145 | dma_wmb(); | |
146 | ||
147 | /* | |
148 | * Request interrupts for start and completion | |
149 | * Valid bit is applicable only to 1st entry | |
150 | */ | |
151 | entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID | | |
152 | CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS; | |
153 | } | |
154 | ||
155 | /* Initialize fpbt entries to point to dummy frame */ | |
156 | static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2, | |
157 | struct cio2_fbpt_entry | |
158 | entry[CIO2_MAX_LOPS]) | |
159 | { | |
160 | unsigned int i; | |
161 | ||
162 | entry[0].first_entry.first_page_offset = 0; | |
163 | entry[1].second_entry.num_of_pages = | |
164 | CIO2_PAGE_SIZE / sizeof(u32) * CIO2_MAX_LOPS; | |
165 | entry[1].second_entry.last_page_available_bytes = CIO2_PAGE_SIZE - 1; | |
166 | ||
167 | for (i = 0; i < CIO2_MAX_LOPS; i++) | |
168 | entry[i].lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT; | |
169 | ||
170 | cio2_fbpt_entry_enable(cio2, entry); | |
171 | } | |
172 | ||
173 | /* Initialize fpbt entries to point to a given buffer */ | |
174 | static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2, | |
175 | struct cio2_buffer *b, | |
176 | struct cio2_fbpt_entry | |
177 | entry[CIO2_MAX_LOPS]) | |
178 | { | |
179 | struct vb2_buffer *vb = &b->vbb.vb2_buf; | |
180 | unsigned int length = vb->planes[0].length; | |
181 | int remaining, i; | |
182 | ||
183 | entry[0].first_entry.first_page_offset = b->offset; | |
184 | remaining = length + entry[0].first_entry.first_page_offset; | |
185 | entry[1].second_entry.num_of_pages = | |
186 | DIV_ROUND_UP(remaining, CIO2_PAGE_SIZE); | |
187 | /* | |
188 | * last_page_available_bytes has the offset of the last byte in the | |
189 | * last page which is still accessible by DMA. DMA cannot access | |
190 | * beyond this point. Valid range for this is from 0 to 4095. | |
191 | * 0 indicates 1st byte in the page is DMA accessible. | |
192 | * 4095 (CIO2_PAGE_SIZE - 1) means every single byte in the last page | |
193 | * is available for DMA transfer. | |
194 | */ | |
195 | entry[1].second_entry.last_page_available_bytes = | |
196 | (remaining & ~PAGE_MASK) ? | |
197 | (remaining & ~PAGE_MASK) - 1 : | |
198 | CIO2_PAGE_SIZE - 1; | |
199 | /* Fill FBPT */ | |
200 | remaining = length; | |
201 | i = 0; | |
202 | while (remaining > 0) { | |
203 | entry->lop_page_addr = b->lop_bus_addr[i] >> PAGE_SHIFT; | |
204 | remaining -= CIO2_PAGE_SIZE / sizeof(u32) * CIO2_PAGE_SIZE; | |
205 | entry++; | |
206 | i++; | |
207 | } | |
208 | ||
209 | /* | |
210 | * The first not meaningful FBPT entry should point to a valid LOP | |
211 | */ | |
212 | entry->lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT; | |
213 | ||
214 | cio2_fbpt_entry_enable(cio2, entry); | |
215 | } | |
216 | ||
217 | static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q) | |
218 | { | |
219 | struct device *dev = &cio2->pci_dev->dev; | |
220 | ||
1526dbeb | 221 | q->fbpt = dma_zalloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr, |
222 | GFP_KERNEL); | |
c2a6a07a YZ |
223 | if (!q->fbpt) |
224 | return -ENOMEM; | |
225 | ||
c2a6a07a YZ |
226 | return 0; |
227 | } | |
228 | ||
229 | static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev) | |
230 | { | |
231 | dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr); | |
232 | } | |
233 | ||
234 | /**************** CSI2 hardware setup ****************/ | |
235 | ||
236 | /* | |
237 | * The CSI2 receiver has several parameters affecting | |
238 | * the receiver timings. These depend on the MIPI bus frequency | |
239 | * F in Hz (sensor transmitter rate) as follows: | |
240 | * register value = (A/1e9 + B * UI) / COUNT_ACC | |
241 | * where | |
242 | * UI = 1 / (2 * F) in seconds | |
243 | * COUNT_ACC = counter accuracy in seconds | |
244 | * For IPU3 COUNT_ACC = 0.0625 | |
245 | * | |
246 | * A and B are coefficients from the table below, | |
247 | * depending whether the register minimum or maximum value is | |
248 | * calculated. | |
249 | * Minimum Maximum | |
250 | * Clock lane A B A B | |
251 | * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0 | |
252 | * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16 | |
253 | * Data lanes | |
254 | * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4 | |
255 | * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6 | |
256 | * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4 | |
257 | * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6 | |
258 | * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4 | |
259 | * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6 | |
260 | * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4 | |
261 | * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6 | |
262 | * | |
263 | * We use the minimum values of both A and B. | |
264 | */ | |
265 | ||
266 | /* | |
267 | * shift for keeping value range suitable for 32-bit integer arithmetics | |
268 | */ | |
269 | #define LIMIT_SHIFT 8 | |
270 | ||
271 | static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def) | |
272 | { | |
273 | const u32 accinv = 16; /* invert of counter resolution */ | |
274 | const u32 uiinv = 500000000; /* 1e9 / 2 */ | |
275 | s32 r; | |
276 | ||
277 | freq >>= LIMIT_SHIFT; | |
278 | ||
279 | if (WARN_ON(freq <= 0 || freq > S32_MAX)) | |
280 | return def; | |
281 | /* | |
282 | * b could be 0, -2 or -8, so |accinv * b| is always | |
283 | * less than (1 << ds) and thus |r| < 500000000. | |
284 | */ | |
285 | r = accinv * b * (uiinv >> LIMIT_SHIFT); | |
286 | r = r / (s32)freq; | |
287 | /* max value of a is 95 */ | |
288 | r += accinv * a; | |
289 | ||
290 | return r; | |
291 | }; | |
292 | ||
293 | /* Calculate the the delay value for termination enable of clock lane HS Rx */ | |
294 | static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q, | |
295 | struct cio2_csi2_timing *timing) | |
296 | { | |
297 | struct device *dev = &cio2->pci_dev->dev; | |
298 | struct v4l2_querymenu qm = {.id = V4L2_CID_LINK_FREQ, }; | |
299 | struct v4l2_ctrl *link_freq; | |
300 | s64 freq; | |
301 | int r; | |
302 | ||
303 | if (!q->sensor) | |
304 | return -ENODEV; | |
305 | ||
306 | link_freq = v4l2_ctrl_find(q->sensor->ctrl_handler, V4L2_CID_LINK_FREQ); | |
307 | if (!link_freq) { | |
308 | dev_err(dev, "failed to find LINK_FREQ\n"); | |
309 | return -EPIPE; | |
310 | } | |
311 | ||
312 | qm.index = v4l2_ctrl_g_ctrl(link_freq); | |
313 | r = v4l2_querymenu(q->sensor->ctrl_handler, &qm); | |
314 | if (r) { | |
315 | dev_err(dev, "failed to get menu item\n"); | |
316 | return r; | |
317 | } | |
318 | ||
319 | if (!qm.value) { | |
320 | dev_err(dev, "error invalid link_freq\n"); | |
321 | return -EINVAL; | |
322 | } | |
323 | freq = qm.value; | |
324 | ||
325 | timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A, | |
326 | CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B, | |
327 | freq, | |
328 | CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT); | |
329 | timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A, | |
330 | CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B, | |
331 | freq, | |
332 | CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT); | |
333 | timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A, | |
334 | CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B, | |
335 | freq, | |
336 | CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT); | |
337 | timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A, | |
338 | CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B, | |
339 | freq, | |
340 | CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT); | |
341 | ||
342 | dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen); | |
343 | dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle); | |
344 | dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen); | |
345 | dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle); | |
346 | ||
347 | return 0; | |
348 | }; | |
349 | ||
350 | static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q) | |
351 | { | |
352 | static const int NUM_VCS = 4; | |
353 | static const int SID; /* Stream id */ | |
354 | static const int ENTRY; | |
355 | static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS, | |
356 | CIO2_FBPT_SUBENTRY_UNIT); | |
357 | const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1; | |
358 | const struct ipu3_cio2_fmt *fmt; | |
359 | void __iomem *const base = cio2->base; | |
360 | u8 lanes, csi2bus = q->csi2.port; | |
361 | u8 sensor_vc = SENSOR_VIR_CH_DFLT; | |
362 | struct cio2_csi2_timing timing; | |
363 | int i, r; | |
364 | ||
365 | fmt = cio2_find_format(NULL, &q->subdev_fmt.code); | |
366 | if (!fmt) | |
367 | return -EINVAL; | |
368 | ||
369 | lanes = q->csi2.lanes; | |
370 | ||
371 | r = cio2_csi2_calc_timing(cio2, q, &timing); | |
372 | if (r) | |
373 | return r; | |
374 | ||
375 | writel(timing.clk_termen, q->csi_rx_base + | |
376 | CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX)); | |
377 | writel(timing.clk_settle, q->csi_rx_base + | |
378 | CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX)); | |
379 | ||
380 | for (i = 0; i < lanes; i++) { | |
381 | writel(timing.dat_termen, q->csi_rx_base + | |
382 | CIO2_REG_CSIRX_DLY_CNT_TERMEN(i)); | |
383 | writel(timing.dat_settle, q->csi_rx_base + | |
384 | CIO2_REG_CSIRX_DLY_CNT_SETTLE(i)); | |
385 | } | |
386 | ||
387 | writel(CIO2_PBM_WMCTRL1_MIN_2CK | | |
388 | CIO2_PBM_WMCTRL1_MID1_2CK | | |
389 | CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1); | |
390 | writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT | | |
391 | CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT | | |
392 | CIO2_PBM_WMCTRL2_OBFFWM_2CK << | |
393 | CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT | | |
394 | CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT | | |
395 | CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2); | |
396 | writel(CIO2_PBM_ARB_CTRL_LANES_DIV << | |
397 | CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT | | |
398 | CIO2_PBM_ARB_CTRL_LE_EN | | |
399 | CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN << | |
400 | CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT | | |
401 | CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP << | |
402 | CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT, | |
403 | base + CIO2_REG_PBM_ARB_CTRL); | |
404 | writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK, | |
405 | q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS); | |
406 | writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK, | |
407 | q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP); | |
408 | ||
409 | writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ); | |
410 | writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO); | |
411 | ||
412 | /* Configure MIPI backend */ | |
413 | for (i = 0; i < NUM_VCS; i++) | |
414 | writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i)); | |
415 | ||
416 | /* There are 16 short packet LUT entry */ | |
417 | for (i = 0; i < 16; i++) | |
418 | writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD, | |
419 | q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i)); | |
420 | writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD, | |
421 | q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD); | |
422 | ||
423 | writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE); | |
424 | writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK); | |
425 | writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE); | |
426 | writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE); | |
427 | writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE); | |
428 | writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE); | |
429 | ||
430 | writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) | | |
431 | CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN), | |
432 | base + CIO2_REG_INT_EN); | |
433 | ||
434 | writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B) | |
435 | << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT, | |
436 | base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus)); | |
437 | writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT | | |
438 | sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT | | |
439 | fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT, | |
440 | q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY)); | |
441 | writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc)); | |
442 | writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8); | |
443 | writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus)); | |
444 | ||
445 | writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES); | |
446 | writel(CIO2_CGC_PRIM_TGE | | |
447 | CIO2_CGC_SIDE_TGE | | |
448 | CIO2_CGC_XOSC_TGE | | |
449 | CIO2_CGC_D3I3_TGE | | |
450 | CIO2_CGC_CSI2_INTERFRAME_TGE | | |
451 | CIO2_CGC_CSI2_PORT_DCGE | | |
452 | CIO2_CGC_SIDE_DCGE | | |
453 | CIO2_CGC_PRIM_DCGE | | |
454 | CIO2_CGC_ROSC_DCGE | | |
455 | CIO2_CGC_XOSC_DCGE | | |
456 | CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT | | |
457 | CIO2_CGC_CSI_CLKGATE_HOLDOFF | |
458 | << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC); | |
459 | writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL); | |
460 | writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT | | |
461 | CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT | | |
462 | CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT | | |
463 | CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT, | |
464 | base + CIO2_REG_LTRVAL01); | |
465 | writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT | | |
466 | CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT | | |
467 | CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT | | |
468 | CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT, | |
469 | base + CIO2_REG_LTRVAL23); | |
470 | ||
471 | for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) { | |
472 | writel(0, base + CIO2_REG_CDMABA(i)); | |
473 | writel(0, base + CIO2_REG_CDMAC0(i)); | |
474 | writel(0, base + CIO2_REG_CDMAC1(i)); | |
475 | } | |
476 | ||
477 | /* Enable DMA */ | |
478 | writel(q->fbpt_bus_addr >> PAGE_SHIFT, | |
479 | base + CIO2_REG_CDMABA(CIO2_DMA_CHAN)); | |
480 | ||
481 | writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT | | |
482 | FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT | | |
483 | CIO2_CDMAC0_DMA_INTR_ON_FE | | |
484 | CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL | | |
485 | CIO2_CDMAC0_DMA_EN | | |
486 | CIO2_CDMAC0_DMA_INTR_ON_FS | | |
487 | CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)); | |
488 | ||
489 | writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT, | |
490 | base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN)); | |
491 | ||
492 | writel(0, base + CIO2_REG_PBM_FOPN_ABORT); | |
493 | ||
494 | writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT | | |
495 | CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR | | |
496 | CIO2_PXM_FRF_CFG_MSK_ECC_RE | | |
497 | CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE, | |
498 | base + CIO2_REG_PXM_FRF_CFG(q->csi2.port)); | |
499 | ||
500 | /* Clear interrupts */ | |
501 | writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR); | |
502 | writel(~0, base + CIO2_REG_INT_STS_EXT_OE); | |
503 | writel(~0, base + CIO2_REG_INT_STS_EXT_IE); | |
504 | writel(~0, base + CIO2_REG_INT_STS); | |
505 | ||
506 | /* Enable devices, starting from the last device in the pipe */ | |
507 | writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE); | |
508 | writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE); | |
509 | ||
510 | return 0; | |
511 | } | |
512 | ||
513 | static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q) | |
514 | { | |
515 | void __iomem *base = cio2->base; | |
516 | unsigned int i, maxloops = 1000; | |
517 | ||
518 | /* Disable CSI receiver and MIPI backend devices */ | |
d69a5a2c YZ |
519 | writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK); |
520 | writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE); | |
c2a6a07a YZ |
521 | writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE); |
522 | writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE); | |
523 | ||
524 | /* Halt DMA */ | |
525 | writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)); | |
526 | do { | |
527 | if (readl(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)) & | |
528 | CIO2_CDMAC0_DMA_HALTED) | |
529 | break; | |
530 | usleep_range(1000, 2000); | |
531 | } while (--maxloops); | |
532 | if (!maxloops) | |
533 | dev_err(&cio2->pci_dev->dev, | |
534 | "DMA %i can not be halted\n", CIO2_DMA_CHAN); | |
535 | ||
536 | for (i = 0; i < CIO2_NUM_PORTS; i++) { | |
537 | writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) | | |
538 | CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i)); | |
539 | writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) | | |
540 | CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT); | |
541 | } | |
542 | } | |
543 | ||
544 | static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan) | |
545 | { | |
546 | struct device *dev = &cio2->pci_dev->dev; | |
547 | struct cio2_queue *q = cio2->cur_queue; | |
548 | int buffers_found = 0; | |
549 | u64 ns = ktime_get_ns(); | |
550 | ||
551 | if (dma_chan >= CIO2_QUEUES) { | |
552 | dev_err(dev, "bad DMA channel %i\n", dma_chan); | |
553 | return; | |
554 | } | |
555 | ||
556 | /* Find out which buffer(s) are ready */ | |
557 | do { | |
558 | struct cio2_fbpt_entry *const entry = | |
559 | &q->fbpt[q->bufs_first * CIO2_MAX_LOPS]; | |
560 | struct cio2_buffer *b; | |
561 | ||
562 | if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) | |
563 | break; | |
564 | ||
565 | b = q->bufs[q->bufs_first]; | |
566 | if (b) { | |
567 | unsigned int bytes = entry[1].second_entry.num_of_bytes; | |
568 | ||
569 | q->bufs[q->bufs_first] = NULL; | |
570 | atomic_dec(&q->bufs_queued); | |
571 | dev_dbg(&cio2->pci_dev->dev, | |
572 | "buffer %i done\n", b->vbb.vb2_buf.index); | |
573 | ||
574 | b->vbb.vb2_buf.timestamp = ns; | |
575 | b->vbb.field = V4L2_FIELD_NONE; | |
576 | b->vbb.sequence = atomic_read(&q->frame_sequence); | |
577 | if (b->vbb.vb2_buf.planes[0].length != bytes) | |
578 | dev_warn(dev, "buffer length is %d received %d\n", | |
579 | b->vbb.vb2_buf.planes[0].length, | |
580 | bytes); | |
581 | vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE); | |
582 | } | |
583 | atomic_inc(&q->frame_sequence); | |
584 | cio2_fbpt_entry_init_dummy(cio2, entry); | |
585 | q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS; | |
586 | buffers_found++; | |
587 | } while (1); | |
588 | ||
589 | if (buffers_found == 0) | |
590 | dev_warn(&cio2->pci_dev->dev, | |
591 | "no ready buffers found on DMA channel %u\n", | |
592 | dma_chan); | |
593 | } | |
594 | ||
595 | static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q) | |
596 | { | |
597 | /* | |
598 | * For the user space camera control algorithms it is essential | |
599 | * to know when the reception of a frame has begun. That's often | |
600 | * the best timing information to get from the hardware. | |
601 | */ | |
602 | struct v4l2_event event = { | |
603 | .type = V4L2_EVENT_FRAME_SYNC, | |
604 | .u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence), | |
605 | }; | |
606 | ||
607 | v4l2_event_queue(q->subdev.devnode, &event); | |
608 | } | |
609 | ||
610 | static const char *const cio2_irq_errs[] = { | |
611 | "single packet header error corrected", | |
612 | "multiple packet header errors detected", | |
613 | "payload checksum (CRC) error", | |
614 | "fifo overflow", | |
615 | "reserved short packet data type detected", | |
616 | "reserved long packet data type detected", | |
617 | "incomplete long packet detected", | |
618 | "frame sync error", | |
619 | "line sync error", | |
620 | "DPHY start of transmission error", | |
621 | "DPHY synchronization error", | |
622 | "escape mode error", | |
623 | "escape mode trigger event", | |
624 | "escape mode ultra-low power state for data lane(s)", | |
625 | "escape mode ultra-low power state exit for clock lane", | |
626 | "inter-frame short packet discarded", | |
627 | "inter-frame long packet discarded", | |
628 | "non-matching Long Packet stalled", | |
629 | }; | |
630 | ||
631 | static const char *const cio2_port_errs[] = { | |
632 | "ECC recoverable", | |
633 | "DPHY not recoverable", | |
634 | "ECC not recoverable", | |
635 | "CRC error", | |
636 | "INTERFRAMEDATA", | |
637 | "PKT2SHORT", | |
638 | "PKT2LONG", | |
639 | }; | |
640 | ||
09f20f2b | 641 | static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status) |
c2a6a07a | 642 | { |
c2a6a07a YZ |
643 | void __iomem *const base = cio2->base; |
644 | struct device *dev = &cio2->pci_dev->dev; | |
c2a6a07a YZ |
645 | |
646 | if (int_status & CIO2_INT_IOOE) { | |
647 | /* | |
648 | * Interrupt on Output Error: | |
649 | * 1) SRAM is full and FS received, or | |
650 | * 2) An invalid bit detected by DMA. | |
651 | */ | |
652 | u32 oe_status, oe_clear; | |
653 | ||
654 | oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE); | |
655 | oe_status = oe_clear; | |
656 | ||
657 | if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) { | |
658 | dev_err(dev, "DMA output error: 0x%x\n", | |
659 | (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) | |
660 | >> CIO2_INT_EXT_OE_DMAOE_SHIFT); | |
661 | oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK; | |
662 | } | |
663 | if (oe_status & CIO2_INT_EXT_OE_OES_MASK) { | |
664 | dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n", | |
665 | (oe_status & CIO2_INT_EXT_OE_OES_MASK) | |
666 | >> CIO2_INT_EXT_OE_OES_SHIFT); | |
667 | oe_status &= ~CIO2_INT_EXT_OE_OES_MASK; | |
668 | } | |
669 | writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE); | |
670 | if (oe_status) | |
671 | dev_warn(dev, "unknown interrupt 0x%x on OE\n", | |
672 | oe_status); | |
673 | int_status &= ~CIO2_INT_IOOE; | |
674 | } | |
675 | ||
676 | if (int_status & CIO2_INT_IOC_MASK) { | |
677 | /* DMA IO done -- frame ready */ | |
678 | u32 clr = 0; | |
679 | unsigned int d; | |
680 | ||
681 | for (d = 0; d < CIO2_NUM_DMA_CHAN; d++) | |
682 | if (int_status & CIO2_INT_IOC(d)) { | |
683 | clr |= CIO2_INT_IOC(d); | |
684 | cio2_buffer_done(cio2, d); | |
685 | } | |
686 | int_status &= ~clr; | |
687 | } | |
688 | ||
689 | if (int_status & CIO2_INT_IOS_IOLN_MASK) { | |
690 | /* DMA IO starts or reached specified line */ | |
691 | u32 clr = 0; | |
692 | unsigned int d; | |
693 | ||
694 | for (d = 0; d < CIO2_NUM_DMA_CHAN; d++) | |
695 | if (int_status & CIO2_INT_IOS_IOLN(d)) { | |
696 | clr |= CIO2_INT_IOS_IOLN(d); | |
697 | if (d == CIO2_DMA_CHAN) | |
698 | cio2_queue_event_sof(cio2, | |
699 | cio2->cur_queue); | |
700 | } | |
701 | int_status &= ~clr; | |
702 | } | |
703 | ||
704 | if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) { | |
705 | /* CSI2 receiver (error) interrupt */ | |
706 | u32 ie_status, ie_clear; | |
707 | unsigned int port; | |
708 | ||
709 | ie_clear = readl(base + CIO2_REG_INT_STS_EXT_IE); | |
710 | ie_status = ie_clear; | |
711 | ||
712 | for (port = 0; port < CIO2_NUM_PORTS; port++) { | |
713 | u32 port_status = (ie_status >> (port * 8)) & 0xff; | |
714 | u32 err_mask = BIT_MASK(ARRAY_SIZE(cio2_port_errs)) - 1; | |
715 | void __iomem *const csi_rx_base = | |
716 | base + CIO2_REG_PIPE_BASE(port); | |
717 | unsigned int i; | |
718 | ||
719 | while (port_status & err_mask) { | |
720 | i = ffs(port_status) - 1; | |
721 | dev_err(dev, "port %i error %s\n", | |
722 | port, cio2_port_errs[i]); | |
723 | ie_status &= ~BIT(port * 8 + i); | |
724 | port_status &= ~BIT(i); | |
725 | } | |
726 | ||
727 | if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) { | |
728 | u32 csi2_status, csi2_clear; | |
729 | ||
730 | csi2_status = readl(csi_rx_base + | |
731 | CIO2_REG_IRQCTRL_STATUS); | |
732 | csi2_clear = csi2_status; | |
733 | err_mask = | |
734 | BIT_MASK(ARRAY_SIZE(cio2_irq_errs)) - 1; | |
735 | ||
736 | while (csi2_status & err_mask) { | |
737 | i = ffs(csi2_status) - 1; | |
738 | dev_err(dev, | |
739 | "CSI-2 receiver port %i: %s\n", | |
740 | port, cio2_irq_errs[i]); | |
741 | csi2_status &= ~BIT(i); | |
742 | } | |
743 | ||
744 | writel(csi2_clear, | |
745 | csi_rx_base + CIO2_REG_IRQCTRL_CLEAR); | |
746 | if (csi2_status) | |
747 | dev_warn(dev, | |
748 | "unknown CSI2 error 0x%x on port %i\n", | |
749 | csi2_status, port); | |
750 | ||
751 | ie_status &= ~CIO2_INT_EXT_IE_IRQ(port); | |
752 | } | |
753 | } | |
754 | ||
755 | writel(ie_clear, base + CIO2_REG_INT_STS_EXT_IE); | |
756 | if (ie_status) | |
757 | dev_warn(dev, "unknown interrupt 0x%x on IE\n", | |
758 | ie_status); | |
759 | ||
760 | int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ); | |
761 | } | |
762 | ||
c2a6a07a YZ |
763 | if (int_status) |
764 | dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status); | |
09f20f2b BC |
765 | } |
766 | ||
767 | static irqreturn_t cio2_irq(int irq, void *cio2_ptr) | |
768 | { | |
769 | struct cio2_device *cio2 = cio2_ptr; | |
770 | void __iomem *const base = cio2->base; | |
771 | struct device *dev = &cio2->pci_dev->dev; | |
772 | u32 int_status; | |
773 | ||
774 | int_status = readl(base + CIO2_REG_INT_STS); | |
775 | dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status); | |
776 | if (!int_status) | |
777 | return IRQ_NONE; | |
778 | ||
779 | do { | |
780 | writel(int_status, base + CIO2_REG_INT_STS); | |
781 | cio2_irq_handle_once(cio2, int_status); | |
782 | int_status = readl(base + CIO2_REG_INT_STS); | |
783 | if (int_status) | |
784 | dev_dbg(dev, "pending status 0x%x\n", int_status); | |
785 | } while (int_status); | |
c2a6a07a YZ |
786 | |
787 | return IRQ_HANDLED; | |
788 | } | |
789 | ||
790 | /**************** Videobuf2 interface ****************/ | |
791 | ||
dcd80955 YZ |
792 | static void cio2_vb2_return_all_buffers(struct cio2_queue *q, |
793 | enum vb2_buffer_state state) | |
c2a6a07a YZ |
794 | { |
795 | unsigned int i; | |
796 | ||
797 | for (i = 0; i < CIO2_MAX_BUFFERS; i++) { | |
798 | if (q->bufs[i]) { | |
799 | atomic_dec(&q->bufs_queued); | |
800 | vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf, | |
dcd80955 | 801 | state); |
c2a6a07a YZ |
802 | } |
803 | } | |
804 | } | |
805 | ||
806 | static int cio2_vb2_queue_setup(struct vb2_queue *vq, | |
807 | unsigned int *num_buffers, | |
808 | unsigned int *num_planes, | |
809 | unsigned int sizes[], | |
810 | struct device *alloc_devs[]) | |
811 | { | |
812 | struct cio2_device *cio2 = vb2_get_drv_priv(vq); | |
813 | struct cio2_queue *q = vb2q_to_cio2_queue(vq); | |
814 | unsigned int i; | |
815 | ||
816 | *num_planes = q->format.num_planes; | |
817 | ||
818 | for (i = 0; i < *num_planes; ++i) { | |
819 | sizes[i] = q->format.plane_fmt[i].sizeimage; | |
820 | alloc_devs[i] = &cio2->pci_dev->dev; | |
821 | } | |
822 | ||
823 | *num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS); | |
824 | ||
825 | /* Initialize buffer queue */ | |
826 | for (i = 0; i < CIO2_MAX_BUFFERS; i++) { | |
827 | q->bufs[i] = NULL; | |
828 | cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]); | |
829 | } | |
830 | atomic_set(&q->bufs_queued, 0); | |
831 | q->bufs_first = 0; | |
832 | q->bufs_next = 0; | |
833 | ||
834 | return 0; | |
835 | } | |
836 | ||
837 | /* Called after each buffer is allocated */ | |
838 | static int cio2_vb2_buf_init(struct vb2_buffer *vb) | |
839 | { | |
840 | struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue); | |
841 | struct device *dev = &cio2->pci_dev->dev; | |
842 | struct cio2_buffer *b = | |
843 | container_of(vb, struct cio2_buffer, vbb.vb2_buf); | |
844 | static const unsigned int entries_per_page = | |
845 | CIO2_PAGE_SIZE / sizeof(u32); | |
401f6930 YZ |
846 | unsigned int pages = DIV_ROUND_UP(vb->planes[0].length, CIO2_PAGE_SIZE); |
847 | unsigned int lops = DIV_ROUND_UP(pages + 1, entries_per_page); | |
c2a6a07a YZ |
848 | struct sg_table *sg; |
849 | struct sg_page_iter sg_iter; | |
850 | int i, j; | |
851 | ||
852 | if (lops <= 0 || lops > CIO2_MAX_LOPS) { | |
853 | dev_err(dev, "%s: bad buffer size (%i)\n", __func__, | |
854 | vb->planes[0].length); | |
855 | return -ENOSPC; /* Should never happen */ | |
856 | } | |
857 | ||
858 | memset(b->lop, 0, sizeof(b->lop)); | |
859 | /* Allocate LOP table */ | |
860 | for (i = 0; i < lops; i++) { | |
861 | b->lop[i] = dma_alloc_coherent(dev, CIO2_PAGE_SIZE, | |
862 | &b->lop_bus_addr[i], GFP_KERNEL); | |
863 | if (!b->lop[i]) | |
864 | goto fail; | |
865 | } | |
866 | ||
867 | /* Fill LOP */ | |
868 | sg = vb2_dma_sg_plane_desc(vb, 0); | |
869 | if (!sg) | |
870 | return -ENOMEM; | |
871 | ||
872 | if (sg->nents && sg->sgl) | |
873 | b->offset = sg->sgl->offset; | |
874 | ||
875 | i = j = 0; | |
876 | for_each_sg_page(sg->sgl, &sg_iter, sg->nents, 0) { | |
c7cbef1f YZ |
877 | if (!pages--) |
878 | break; | |
c2a6a07a YZ |
879 | b->lop[i][j] = sg_page_iter_dma_address(&sg_iter) >> PAGE_SHIFT; |
880 | j++; | |
881 | if (j == entries_per_page) { | |
882 | i++; | |
883 | j = 0; | |
884 | } | |
885 | } | |
886 | ||
887 | b->lop[i][j] = cio2->dummy_page_bus_addr >> PAGE_SHIFT; | |
888 | return 0; | |
889 | fail: | |
890 | for (i--; i >= 0; i--) | |
891 | dma_free_coherent(dev, CIO2_PAGE_SIZE, | |
892 | b->lop[i], b->lop_bus_addr[i]); | |
893 | return -ENOMEM; | |
894 | } | |
895 | ||
896 | /* Transfer buffer ownership to cio2 */ | |
897 | static void cio2_vb2_buf_queue(struct vb2_buffer *vb) | |
898 | { | |
899 | struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue); | |
900 | struct cio2_queue *q = | |
901 | container_of(vb->vb2_queue, struct cio2_queue, vbq); | |
902 | struct cio2_buffer *b = | |
903 | container_of(vb, struct cio2_buffer, vbb.vb2_buf); | |
904 | struct cio2_fbpt_entry *entry; | |
905 | unsigned long flags; | |
906 | unsigned int i, j, next = q->bufs_next; | |
907 | int bufs_queued = atomic_inc_return(&q->bufs_queued); | |
908 | u32 fbpt_rp; | |
909 | ||
910 | dev_dbg(&cio2->pci_dev->dev, "queue buffer %d\n", vb->index); | |
911 | ||
912 | /* | |
913 | * This code queues the buffer to the CIO2 DMA engine, which starts | |
914 | * running once streaming has started. It is possible that this code | |
915 | * gets pre-empted due to increased CPU load. Upon this, the driver | |
916 | * does not get an opportunity to queue new buffers to the CIO2 DMA | |
917 | * engine. When the DMA engine encounters an FBPT entry without the | |
918 | * VALID bit set, the DMA engine halts, which requires a restart of | |
919 | * the DMA engine and sensor, to continue streaming. | |
920 | * This is not desired and is highly unlikely given that there are | |
921 | * 32 FBPT entries that the DMA engine needs to process, to run into | |
922 | * an FBPT entry, without the VALID bit set. We try to mitigate this | |
923 | * by disabling interrupts for the duration of this queueing. | |
924 | */ | |
925 | local_irq_save(flags); | |
926 | ||
927 | fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN)) | |
928 | >> CIO2_CDMARI_FBPT_RP_SHIFT) | |
929 | & CIO2_CDMARI_FBPT_RP_MASK; | |
930 | ||
931 | /* | |
932 | * fbpt_rp is the fbpt entry that the dma is currently working | |
933 | * on, but since it could jump to next entry at any time, | |
934 | * assume that we might already be there. | |
935 | */ | |
936 | fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS; | |
937 | ||
938 | if (bufs_queued <= 1 || fbpt_rp == next) | |
939 | /* Buffers were drained */ | |
940 | next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS; | |
941 | ||
942 | for (i = 0; i < CIO2_MAX_BUFFERS; i++) { | |
943 | /* | |
944 | * We have allocated CIO2_MAX_BUFFERS circularly for the | |
945 | * hw, the user has requested N buffer queue. The driver | |
946 | * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever | |
947 | * user queues a buffer, there necessarily is a free buffer. | |
948 | */ | |
949 | if (!q->bufs[next]) { | |
950 | q->bufs[next] = b; | |
951 | entry = &q->fbpt[next * CIO2_MAX_LOPS]; | |
952 | cio2_fbpt_entry_init_buf(cio2, b, entry); | |
953 | local_irq_restore(flags); | |
954 | q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS; | |
955 | for (j = 0; j < vb->num_planes; j++) | |
956 | vb2_set_plane_payload(vb, j, | |
957 | q->format.plane_fmt[j].sizeimage); | |
958 | return; | |
959 | } | |
960 | ||
961 | dev_dbg(&cio2->pci_dev->dev, "entry %i was full!\n", next); | |
962 | next = (next + 1) % CIO2_MAX_BUFFERS; | |
963 | } | |
964 | ||
965 | local_irq_restore(flags); | |
966 | dev_err(&cio2->pci_dev->dev, "error: all cio2 entries were full!\n"); | |
967 | atomic_dec(&q->bufs_queued); | |
968 | vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); | |
969 | } | |
970 | ||
971 | /* Called when each buffer is freed */ | |
972 | static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb) | |
973 | { | |
974 | struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue); | |
975 | struct cio2_buffer *b = | |
976 | container_of(vb, struct cio2_buffer, vbb.vb2_buf); | |
977 | unsigned int i; | |
978 | ||
979 | /* Free LOP table */ | |
980 | for (i = 0; i < CIO2_MAX_LOPS; i++) { | |
981 | if (b->lop[i]) | |
982 | dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE, | |
983 | b->lop[i], b->lop_bus_addr[i]); | |
984 | } | |
985 | } | |
986 | ||
987 | static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) | |
988 | { | |
989 | struct cio2_queue *q = vb2q_to_cio2_queue(vq); | |
990 | struct cio2_device *cio2 = vb2_get_drv_priv(vq); | |
991 | int r; | |
992 | ||
993 | cio2->cur_queue = q; | |
994 | atomic_set(&q->frame_sequence, 0); | |
995 | ||
996 | r = pm_runtime_get_sync(&cio2->pci_dev->dev); | |
997 | if (r < 0) { | |
998 | dev_info(&cio2->pci_dev->dev, "failed to set power %d\n", r); | |
999 | pm_runtime_put_noidle(&cio2->pci_dev->dev); | |
1000 | return r; | |
1001 | } | |
1002 | ||
1003 | r = media_pipeline_start(&q->vdev.entity, &q->pipe); | |
1004 | if (r) | |
1005 | goto fail_pipeline; | |
1006 | ||
1007 | r = cio2_hw_init(cio2, q); | |
1008 | if (r) | |
1009 | goto fail_hw; | |
1010 | ||
1011 | /* Start streaming on sensor */ | |
1012 | r = v4l2_subdev_call(q->sensor, video, s_stream, 1); | |
1013 | if (r) | |
1014 | goto fail_csi2_subdev; | |
1015 | ||
1016 | cio2->streaming = true; | |
1017 | ||
1018 | return 0; | |
1019 | ||
1020 | fail_csi2_subdev: | |
1021 | cio2_hw_exit(cio2, q); | |
1022 | fail_hw: | |
1023 | media_pipeline_stop(&q->vdev.entity); | |
1024 | fail_pipeline: | |
1025 | dev_dbg(&cio2->pci_dev->dev, "failed to start streaming (%d)\n", r); | |
dcd80955 | 1026 | cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED); |
c2a6a07a YZ |
1027 | pm_runtime_put(&cio2->pci_dev->dev); |
1028 | ||
1029 | return r; | |
1030 | } | |
1031 | ||
1032 | static void cio2_vb2_stop_streaming(struct vb2_queue *vq) | |
1033 | { | |
1034 | struct cio2_queue *q = vb2q_to_cio2_queue(vq); | |
1035 | struct cio2_device *cio2 = vb2_get_drv_priv(vq); | |
1036 | ||
1037 | if (v4l2_subdev_call(q->sensor, video, s_stream, 0)) | |
1038 | dev_err(&cio2->pci_dev->dev, | |
1039 | "failed to stop sensor streaming\n"); | |
1040 | ||
1041 | cio2_hw_exit(cio2, q); | |
d69a5a2c | 1042 | synchronize_irq(cio2->pci_dev->irq); |
dcd80955 | 1043 | cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR); |
c2a6a07a YZ |
1044 | media_pipeline_stop(&q->vdev.entity); |
1045 | pm_runtime_put(&cio2->pci_dev->dev); | |
1046 | cio2->streaming = false; | |
1047 | } | |
1048 | ||
1049 | static const struct vb2_ops cio2_vb2_ops = { | |
1050 | .buf_init = cio2_vb2_buf_init, | |
1051 | .buf_queue = cio2_vb2_buf_queue, | |
1052 | .buf_cleanup = cio2_vb2_buf_cleanup, | |
1053 | .queue_setup = cio2_vb2_queue_setup, | |
1054 | .start_streaming = cio2_vb2_start_streaming, | |
1055 | .stop_streaming = cio2_vb2_stop_streaming, | |
1056 | .wait_prepare = vb2_ops_wait_prepare, | |
1057 | .wait_finish = vb2_ops_wait_finish, | |
1058 | }; | |
1059 | ||
1060 | /**************** V4L2 interface ****************/ | |
1061 | ||
1062 | static int cio2_v4l2_querycap(struct file *file, void *fh, | |
1063 | struct v4l2_capability *cap) | |
1064 | { | |
1065 | struct cio2_device *cio2 = video_drvdata(file); | |
1066 | ||
c0decac1 MCC |
1067 | strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver)); |
1068 | strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card)); | |
c2a6a07a YZ |
1069 | snprintf(cap->bus_info, sizeof(cap->bus_info), |
1070 | "PCI:%s", pci_name(cio2->pci_dev)); | |
1071 | ||
1072 | return 0; | |
1073 | } | |
1074 | ||
1075 | static int cio2_v4l2_enum_fmt(struct file *file, void *fh, | |
1076 | struct v4l2_fmtdesc *f) | |
1077 | { | |
1078 | if (f->index >= ARRAY_SIZE(formats)) | |
1079 | return -EINVAL; | |
1080 | ||
1081 | f->pixelformat = formats[f->index].fourcc; | |
1082 | ||
1083 | return 0; | |
1084 | } | |
1085 | ||
1086 | /* The format is validated in cio2_video_link_validate() */ | |
1087 | static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f) | |
1088 | { | |
1089 | struct cio2_queue *q = file_to_cio2_queue(file); | |
1090 | ||
1091 | f->fmt.pix_mp = q->format; | |
1092 | ||
1093 | return 0; | |
1094 | } | |
1095 | ||
1096 | static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f) | |
1097 | { | |
1098 | const struct ipu3_cio2_fmt *fmt; | |
1099 | struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp; | |
1100 | ||
1101 | fmt = cio2_find_format(&mpix->pixelformat, NULL); | |
1102 | if (!fmt) | |
1103 | fmt = &formats[0]; | |
1104 | ||
1105 | /* Only supports up to 4224x3136 */ | |
1106 | if (mpix->width > CIO2_IMAGE_MAX_WIDTH) | |
1107 | mpix->width = CIO2_IMAGE_MAX_WIDTH; | |
1108 | if (mpix->height > CIO2_IMAGE_MAX_LENGTH) | |
1109 | mpix->height = CIO2_IMAGE_MAX_LENGTH; | |
1110 | ||
1111 | mpix->num_planes = 1; | |
1112 | mpix->pixelformat = fmt->fourcc; | |
1113 | mpix->colorspace = V4L2_COLORSPACE_RAW; | |
1114 | mpix->field = V4L2_FIELD_NONE; | |
1115 | memset(mpix->reserved, 0, sizeof(mpix->reserved)); | |
1116 | mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width); | |
1117 | mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline * | |
1118 | mpix->height; | |
1119 | memset(mpix->plane_fmt[0].reserved, 0, | |
1120 | sizeof(mpix->plane_fmt[0].reserved)); | |
1121 | ||
1122 | /* use default */ | |
1123 | mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; | |
1124 | mpix->quantization = V4L2_QUANTIZATION_DEFAULT; | |
1125 | mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT; | |
1126 | ||
1127 | return 0; | |
1128 | } | |
1129 | ||
1130 | static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f) | |
1131 | { | |
1132 | struct cio2_queue *q = file_to_cio2_queue(file); | |
1133 | ||
1134 | cio2_v4l2_try_fmt(file, fh, f); | |
1135 | q->format = f->fmt.pix_mp; | |
1136 | ||
1137 | return 0; | |
1138 | } | |
1139 | ||
1140 | static int | |
1141 | cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input) | |
1142 | { | |
1143 | if (input->index > 0) | |
1144 | return -EINVAL; | |
1145 | ||
c0decac1 | 1146 | strscpy(input->name, "camera", sizeof(input->name)); |
c2a6a07a YZ |
1147 | input->type = V4L2_INPUT_TYPE_CAMERA; |
1148 | ||
1149 | return 0; | |
1150 | } | |
1151 | ||
1152 | static int | |
1153 | cio2_video_g_input(struct file *file, void *fh, unsigned int *input) | |
1154 | { | |
1155 | *input = 0; | |
1156 | ||
1157 | return 0; | |
1158 | } | |
1159 | ||
1160 | static int | |
1161 | cio2_video_s_input(struct file *file, void *fh, unsigned int input) | |
1162 | { | |
1163 | return input == 0 ? 0 : -EINVAL; | |
1164 | } | |
1165 | ||
1166 | static const struct v4l2_file_operations cio2_v4l2_fops = { | |
1167 | .owner = THIS_MODULE, | |
1168 | .unlocked_ioctl = video_ioctl2, | |
1169 | .open = v4l2_fh_open, | |
1170 | .release = vb2_fop_release, | |
1171 | .poll = vb2_fop_poll, | |
1172 | .mmap = vb2_fop_mmap, | |
1173 | }; | |
1174 | ||
1175 | static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = { | |
1176 | .vidioc_querycap = cio2_v4l2_querycap, | |
1177 | .vidioc_enum_fmt_vid_cap_mplane = cio2_v4l2_enum_fmt, | |
1178 | .vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt, | |
1179 | .vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt, | |
1180 | .vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt, | |
1181 | .vidioc_reqbufs = vb2_ioctl_reqbufs, | |
1182 | .vidioc_create_bufs = vb2_ioctl_create_bufs, | |
1183 | .vidioc_prepare_buf = vb2_ioctl_prepare_buf, | |
1184 | .vidioc_querybuf = vb2_ioctl_querybuf, | |
1185 | .vidioc_qbuf = vb2_ioctl_qbuf, | |
1186 | .vidioc_dqbuf = vb2_ioctl_dqbuf, | |
1187 | .vidioc_streamon = vb2_ioctl_streamon, | |
1188 | .vidioc_streamoff = vb2_ioctl_streamoff, | |
1189 | .vidioc_expbuf = vb2_ioctl_expbuf, | |
1190 | .vidioc_enum_input = cio2_video_enum_input, | |
1191 | .vidioc_g_input = cio2_video_g_input, | |
1192 | .vidioc_s_input = cio2_video_s_input, | |
1193 | }; | |
1194 | ||
1195 | static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd, | |
1196 | struct v4l2_fh *fh, | |
1197 | struct v4l2_event_subscription *sub) | |
1198 | { | |
1199 | if (sub->type != V4L2_EVENT_FRAME_SYNC) | |
1200 | return -EINVAL; | |
1201 | ||
1202 | /* Line number. For now only zero accepted. */ | |
1203 | if (sub->id != 0) | |
1204 | return -EINVAL; | |
1205 | ||
1206 | return v4l2_event_subscribe(fh, sub, 0, NULL); | |
1207 | } | |
1208 | ||
1209 | static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) | |
1210 | { | |
1211 | struct v4l2_mbus_framefmt *format; | |
1212 | const struct v4l2_mbus_framefmt fmt_default = { | |
1213 | .width = 1936, | |
1214 | .height = 1096, | |
1215 | .code = formats[0].mbus_code, | |
1216 | .field = V4L2_FIELD_NONE, | |
1217 | .colorspace = V4L2_COLORSPACE_RAW, | |
1218 | .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT, | |
1219 | .quantization = V4L2_QUANTIZATION_DEFAULT, | |
1220 | .xfer_func = V4L2_XFER_FUNC_DEFAULT, | |
1221 | }; | |
1222 | ||
1223 | /* Initialize try_fmt */ | |
1224 | format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SINK); | |
1225 | *format = fmt_default; | |
1226 | ||
1227 | /* same as sink */ | |
1228 | format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SOURCE); | |
1229 | *format = fmt_default; | |
1230 | ||
1231 | return 0; | |
1232 | } | |
1233 | ||
1234 | /* | |
1235 | * cio2_subdev_get_fmt - Handle get format by pads subdev method | |
1236 | * @sd : pointer to v4l2 subdev structure | |
1237 | * @cfg: V4L2 subdev pad config | |
1238 | * @fmt: pointer to v4l2 subdev format structure | |
1239 | * return -EINVAL or zero on success | |
1240 | */ | |
1241 | static int cio2_subdev_get_fmt(struct v4l2_subdev *sd, | |
1242 | struct v4l2_subdev_pad_config *cfg, | |
1243 | struct v4l2_subdev_format *fmt) | |
1244 | { | |
1245 | struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev); | |
1246 | struct v4l2_subdev_format format; | |
1247 | int ret; | |
1248 | ||
1249 | if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { | |
1250 | fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad); | |
1251 | return 0; | |
1252 | } | |
1253 | ||
1254 | if (fmt->pad == CIO2_PAD_SINK) { | |
1255 | format.which = V4L2_SUBDEV_FORMAT_ACTIVE; | |
1256 | ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, | |
1257 | &format); | |
1258 | ||
1259 | if (ret) | |
1260 | return ret; | |
1261 | /* update colorspace etc */ | |
1262 | q->subdev_fmt.colorspace = format.format.colorspace; | |
1263 | q->subdev_fmt.ycbcr_enc = format.format.ycbcr_enc; | |
1264 | q->subdev_fmt.quantization = format.format.quantization; | |
1265 | q->subdev_fmt.xfer_func = format.format.xfer_func; | |
1266 | } | |
1267 | ||
1268 | fmt->format = q->subdev_fmt; | |
1269 | ||
1270 | return 0; | |
1271 | } | |
1272 | ||
1273 | /* | |
1274 | * cio2_subdev_set_fmt - Handle set format by pads subdev method | |
1275 | * @sd : pointer to v4l2 subdev structure | |
1276 | * @cfg: V4L2 subdev pad config | |
1277 | * @fmt: pointer to v4l2 subdev format structure | |
1278 | * return -EINVAL or zero on success | |
1279 | */ | |
1280 | static int cio2_subdev_set_fmt(struct v4l2_subdev *sd, | |
1281 | struct v4l2_subdev_pad_config *cfg, | |
1282 | struct v4l2_subdev_format *fmt) | |
1283 | { | |
1284 | struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev); | |
1285 | ||
1286 | /* | |
1287 | * Only allow setting sink pad format; | |
1288 | * source always propagates from sink | |
1289 | */ | |
1290 | if (fmt->pad == CIO2_PAD_SOURCE) | |
1291 | return cio2_subdev_get_fmt(sd, cfg, fmt); | |
1292 | ||
1293 | if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { | |
1294 | *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format; | |
1295 | } else { | |
1296 | /* It's the sink, allow changing frame size */ | |
1297 | q->subdev_fmt.width = fmt->format.width; | |
1298 | q->subdev_fmt.height = fmt->format.height; | |
1299 | q->subdev_fmt.code = fmt->format.code; | |
1300 | fmt->format = q->subdev_fmt; | |
1301 | } | |
1302 | ||
1303 | return 0; | |
1304 | } | |
1305 | ||
1306 | static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd, | |
1307 | struct v4l2_subdev_pad_config *cfg, | |
1308 | struct v4l2_subdev_mbus_code_enum *code) | |
1309 | { | |
1310 | if (code->index >= ARRAY_SIZE(formats)) | |
1311 | return -EINVAL; | |
1312 | ||
1313 | code->code = formats[code->index].mbus_code; | |
1314 | return 0; | |
1315 | } | |
1316 | ||
1317 | static int cio2_subdev_link_validate_get_format(struct media_pad *pad, | |
1318 | struct v4l2_subdev_format *fmt) | |
1319 | { | |
1320 | if (is_media_entity_v4l2_subdev(pad->entity)) { | |
1321 | struct v4l2_subdev *sd = | |
1322 | media_entity_to_v4l2_subdev(pad->entity); | |
1323 | ||
1324 | fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE; | |
1325 | fmt->pad = pad->index; | |
1326 | return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt); | |
1327 | } | |
1328 | ||
1329 | return -EINVAL; | |
1330 | } | |
1331 | ||
1332 | static int cio2_video_link_validate(struct media_link *link) | |
1333 | { | |
1334 | struct video_device *vd = container_of(link->sink->entity, | |
1335 | struct video_device, entity); | |
1336 | struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev); | |
1337 | struct cio2_device *cio2 = video_get_drvdata(vd); | |
1338 | struct v4l2_subdev_format source_fmt; | |
1339 | int ret; | |
1340 | ||
1341 | if (!media_entity_remote_pad(link->sink->entity->pads)) { | |
1342 | dev_info(&cio2->pci_dev->dev, | |
1343 | "video node %s pad not connected\n", vd->name); | |
1344 | return -ENOTCONN; | |
1345 | } | |
1346 | ||
1347 | ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt); | |
1348 | if (ret < 0) | |
1349 | return 0; | |
1350 | ||
1351 | if (source_fmt.format.width != q->format.width || | |
1352 | source_fmt.format.height != q->format.height) { | |
1353 | dev_err(&cio2->pci_dev->dev, | |
1354 | "Wrong width or height %ux%u (%ux%u expected)\n", | |
1355 | q->format.width, q->format.height, | |
1356 | source_fmt.format.width, source_fmt.format.height); | |
1357 | return -EINVAL; | |
1358 | } | |
1359 | ||
1360 | if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code)) | |
1361 | return -EINVAL; | |
1362 | ||
1363 | return 0; | |
1364 | } | |
1365 | ||
1366 | static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = { | |
1367 | .subscribe_event = cio2_subdev_subscribe_event, | |
1368 | .unsubscribe_event = v4l2_event_subdev_unsubscribe, | |
1369 | }; | |
1370 | ||
1371 | static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = { | |
1372 | .open = cio2_subdev_open, | |
1373 | }; | |
1374 | ||
1375 | static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = { | |
1376 | .link_validate = v4l2_subdev_link_validate_default, | |
1377 | .get_fmt = cio2_subdev_get_fmt, | |
1378 | .set_fmt = cio2_subdev_set_fmt, | |
1379 | .enum_mbus_code = cio2_subdev_enum_mbus_code, | |
1380 | }; | |
1381 | ||
1382 | static const struct v4l2_subdev_ops cio2_subdev_ops = { | |
1383 | .core = &cio2_subdev_core_ops, | |
1384 | .pad = &cio2_subdev_pad_ops, | |
1385 | }; | |
1386 | ||
1387 | /******* V4L2 sub-device asynchronous registration callbacks***********/ | |
1388 | ||
1389 | struct sensor_async_subdev { | |
1390 | struct v4l2_async_subdev asd; | |
1391 | struct csi2_bus_info csi2; | |
1392 | }; | |
1393 | ||
1394 | /* The .bound() notifier callback when a match is found */ | |
1395 | static int cio2_notifier_bound(struct v4l2_async_notifier *notifier, | |
1396 | struct v4l2_subdev *sd, | |
1397 | struct v4l2_async_subdev *asd) | |
1398 | { | |
1399 | struct cio2_device *cio2 = container_of(notifier, | |
1400 | struct cio2_device, notifier); | |
1401 | struct sensor_async_subdev *s_asd = container_of(asd, | |
1402 | struct sensor_async_subdev, asd); | |
1403 | struct cio2_queue *q; | |
1404 | ||
1405 | if (cio2->queue[s_asd->csi2.port].sensor) | |
1406 | return -EBUSY; | |
1407 | ||
1408 | q = &cio2->queue[s_asd->csi2.port]; | |
1409 | ||
1410 | q->csi2 = s_asd->csi2; | |
1411 | q->sensor = sd; | |
1412 | q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port); | |
1413 | ||
1414 | return 0; | |
1415 | } | |
1416 | ||
1417 | /* The .unbind callback */ | |
1418 | static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier, | |
1419 | struct v4l2_subdev *sd, | |
1420 | struct v4l2_async_subdev *asd) | |
1421 | { | |
1422 | struct cio2_device *cio2 = container_of(notifier, | |
1423 | struct cio2_device, notifier); | |
1424 | struct sensor_async_subdev *s_asd = container_of(asd, | |
1425 | struct sensor_async_subdev, asd); | |
1426 | ||
1427 | cio2->queue[s_asd->csi2.port].sensor = NULL; | |
1428 | } | |
1429 | ||
1430 | /* .complete() is called after all subdevices have been located */ | |
1431 | static int cio2_notifier_complete(struct v4l2_async_notifier *notifier) | |
1432 | { | |
1433 | struct cio2_device *cio2 = container_of(notifier, struct cio2_device, | |
1434 | notifier); | |
1435 | struct sensor_async_subdev *s_asd; | |
eae2aed1 | 1436 | struct v4l2_async_subdev *asd; |
c2a6a07a | 1437 | struct cio2_queue *q; |
eae2aed1 | 1438 | unsigned int pad; |
c2a6a07a YZ |
1439 | int ret; |
1440 | ||
eae2aed1 SL |
1441 | list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) { |
1442 | s_asd = container_of(asd, struct sensor_async_subdev, asd); | |
c2a6a07a YZ |
1443 | q = &cio2->queue[s_asd->csi2.port]; |
1444 | ||
1445 | for (pad = 0; pad < q->sensor->entity.num_pads; pad++) | |
1446 | if (q->sensor->entity.pads[pad].flags & | |
1447 | MEDIA_PAD_FL_SOURCE) | |
1448 | break; | |
1449 | ||
1450 | if (pad == q->sensor->entity.num_pads) { | |
1451 | dev_err(&cio2->pci_dev->dev, | |
1452 | "failed to find src pad for %s\n", | |
1453 | q->sensor->name); | |
1454 | return -ENXIO; | |
1455 | } | |
1456 | ||
1457 | ret = media_create_pad_link( | |
1458 | &q->sensor->entity, pad, | |
1459 | &q->subdev.entity, CIO2_PAD_SINK, | |
1460 | 0); | |
1461 | if (ret) { | |
1462 | dev_err(&cio2->pci_dev->dev, | |
1463 | "failed to create link for %s\n", | |
eae2aed1 | 1464 | q->sensor->name); |
c2a6a07a YZ |
1465 | return ret; |
1466 | } | |
1467 | } | |
1468 | ||
1469 | return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev); | |
1470 | } | |
1471 | ||
1472 | static const struct v4l2_async_notifier_operations cio2_async_ops = { | |
1473 | .bound = cio2_notifier_bound, | |
1474 | .unbind = cio2_notifier_unbind, | |
1475 | .complete = cio2_notifier_complete, | |
1476 | }; | |
1477 | ||
1478 | static int cio2_fwnode_parse(struct device *dev, | |
1479 | struct v4l2_fwnode_endpoint *vep, | |
1480 | struct v4l2_async_subdev *asd) | |
1481 | { | |
1482 | struct sensor_async_subdev *s_asd = | |
1483 | container_of(asd, struct sensor_async_subdev, asd); | |
1484 | ||
2d95e7ed | 1485 | if (vep->bus_type != V4L2_MBUS_CSI2_DPHY) { |
c2a6a07a YZ |
1486 | dev_err(dev, "Only CSI2 bus type is currently supported\n"); |
1487 | return -EINVAL; | |
1488 | } | |
1489 | ||
1490 | s_asd->csi2.port = vep->base.port; | |
1491 | s_asd->csi2.lanes = vep->bus.mipi_csi2.num_data_lanes; | |
1492 | ||
1493 | return 0; | |
1494 | } | |
1495 | ||
1496 | static int cio2_notifier_init(struct cio2_device *cio2) | |
1497 | { | |
1498 | int ret; | |
1499 | ||
eae2aed1 SL |
1500 | v4l2_async_notifier_init(&cio2->notifier); |
1501 | ||
c2a6a07a YZ |
1502 | ret = v4l2_async_notifier_parse_fwnode_endpoints( |
1503 | &cio2->pci_dev->dev, &cio2->notifier, | |
1504 | sizeof(struct sensor_async_subdev), | |
1505 | cio2_fwnode_parse); | |
1506 | if (ret < 0) | |
1507 | return ret; | |
1508 | ||
d079f94c | 1509 | if (list_empty(&cio2->notifier.asd_list)) |
c2a6a07a YZ |
1510 | return -ENODEV; /* no endpoint */ |
1511 | ||
1512 | cio2->notifier.ops = &cio2_async_ops; | |
1513 | ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier); | |
1514 | if (ret) { | |
1515 | dev_err(&cio2->pci_dev->dev, | |
1516 | "failed to register async notifier : %d\n", ret); | |
1517 | v4l2_async_notifier_cleanup(&cio2->notifier); | |
1518 | } | |
1519 | ||
1520 | return ret; | |
1521 | } | |
1522 | ||
1523 | static void cio2_notifier_exit(struct cio2_device *cio2) | |
1524 | { | |
1525 | v4l2_async_notifier_unregister(&cio2->notifier); | |
1526 | v4l2_async_notifier_cleanup(&cio2->notifier); | |
1527 | } | |
1528 | ||
1529 | /**************** Queue initialization ****************/ | |
1530 | static const struct media_entity_operations cio2_media_ops = { | |
1531 | .link_validate = v4l2_subdev_link_validate, | |
1532 | }; | |
1533 | ||
1534 | static const struct media_entity_operations cio2_video_entity_ops = { | |
1535 | .link_validate = cio2_video_link_validate, | |
1536 | }; | |
1537 | ||
1538 | static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q) | |
1539 | { | |
1540 | static const u32 default_width = 1936; | |
1541 | static const u32 default_height = 1096; | |
1542 | const struct ipu3_cio2_fmt dflt_fmt = formats[0]; | |
1543 | ||
1544 | struct video_device *vdev = &q->vdev; | |
1545 | struct vb2_queue *vbq = &q->vbq; | |
1546 | struct v4l2_subdev *subdev = &q->subdev; | |
1547 | struct v4l2_mbus_framefmt *fmt; | |
1548 | int r; | |
1549 | ||
1550 | /* Initialize miscellaneous variables */ | |
1551 | mutex_init(&q->lock); | |
1552 | ||
1553 | /* Initialize formats to default values */ | |
1554 | fmt = &q->subdev_fmt; | |
1555 | fmt->width = default_width; | |
1556 | fmt->height = default_height; | |
1557 | fmt->code = dflt_fmt.mbus_code; | |
1558 | fmt->field = V4L2_FIELD_NONE; | |
1559 | ||
1560 | q->format.width = default_width; | |
1561 | q->format.height = default_height; | |
1562 | q->format.pixelformat = dflt_fmt.fourcc; | |
1563 | q->format.colorspace = V4L2_COLORSPACE_RAW; | |
1564 | q->format.field = V4L2_FIELD_NONE; | |
1565 | q->format.num_planes = 1; | |
1566 | q->format.plane_fmt[0].bytesperline = | |
1567 | cio2_bytesperline(q->format.width); | |
1568 | q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline * | |
1569 | q->format.height; | |
1570 | ||
1571 | /* Initialize fbpt */ | |
1572 | r = cio2_fbpt_init(cio2, q); | |
1573 | if (r) | |
1574 | goto fail_fbpt; | |
1575 | ||
1576 | /* Initialize media entities */ | |
1577 | q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK | | |
1578 | MEDIA_PAD_FL_MUST_CONNECT; | |
1579 | q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; | |
1580 | subdev->entity.ops = &cio2_media_ops; | |
1581 | subdev->internal_ops = &cio2_subdev_internal_ops; | |
1582 | r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads); | |
1583 | if (r) { | |
1584 | dev_err(&cio2->pci_dev->dev, | |
1585 | "failed initialize subdev media entity (%d)\n", r); | |
1586 | goto fail_subdev_media_entity; | |
1587 | } | |
1588 | ||
1589 | q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; | |
1590 | vdev->entity.ops = &cio2_video_entity_ops; | |
1591 | r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad); | |
1592 | if (r) { | |
1593 | dev_err(&cio2->pci_dev->dev, | |
1594 | "failed initialize videodev media entity (%d)\n", r); | |
1595 | goto fail_vdev_media_entity; | |
1596 | } | |
1597 | ||
1598 | /* Initialize subdev */ | |
1599 | v4l2_subdev_init(subdev, &cio2_subdev_ops); | |
1600 | subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; | |
1601 | subdev->owner = THIS_MODULE; | |
1602 | snprintf(subdev->name, sizeof(subdev->name), | |
1603 | CIO2_ENTITY_NAME " %td", q - cio2->queue); | |
1604 | v4l2_set_subdevdata(subdev, cio2); | |
1605 | r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev); | |
1606 | if (r) { | |
1607 | dev_err(&cio2->pci_dev->dev, | |
1608 | "failed initialize subdev (%d)\n", r); | |
1609 | goto fail_subdev; | |
1610 | } | |
1611 | ||
1612 | /* Initialize vbq */ | |
1613 | vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
1614 | vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF; | |
1615 | vbq->ops = &cio2_vb2_ops; | |
1616 | vbq->mem_ops = &vb2_dma_sg_memops; | |
1617 | vbq->buf_struct_size = sizeof(struct cio2_buffer); | |
1618 | vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; | |
1619 | vbq->min_buffers_needed = 1; | |
1620 | vbq->drv_priv = cio2; | |
1621 | vbq->lock = &q->lock; | |
1622 | r = vb2_queue_init(vbq); | |
1623 | if (r) { | |
1624 | dev_err(&cio2->pci_dev->dev, | |
1625 | "failed to initialize videobuf2 queue (%d)\n", r); | |
1626 | goto fail_vbq; | |
1627 | } | |
1628 | ||
1629 | /* Initialize vdev */ | |
1630 | snprintf(vdev->name, sizeof(vdev->name), | |
1631 | "%s %td", CIO2_NAME, q - cio2->queue); | |
1632 | vdev->release = video_device_release_empty; | |
1633 | vdev->fops = &cio2_v4l2_fops; | |
1634 | vdev->ioctl_ops = &cio2_v4l2_ioctl_ops; | |
1635 | vdev->lock = &cio2->lock; | |
1636 | vdev->v4l2_dev = &cio2->v4l2_dev; | |
1637 | vdev->queue = &q->vbq; | |
1638 | vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING; | |
1639 | video_set_drvdata(vdev, cio2); | |
1640 | r = video_register_device(vdev, VFL_TYPE_GRABBER, -1); | |
1641 | if (r) { | |
1642 | dev_err(&cio2->pci_dev->dev, | |
1643 | "failed to register video device (%d)\n", r); | |
1644 | goto fail_vdev; | |
1645 | } | |
1646 | ||
1647 | /* Create link from CIO2 subdev to output node */ | |
1648 | r = media_create_pad_link( | |
1649 | &subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0, | |
1650 | MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); | |
1651 | if (r) | |
1652 | goto fail_link; | |
1653 | ||
1654 | return 0; | |
1655 | ||
1656 | fail_link: | |
1657 | video_unregister_device(&q->vdev); | |
1658 | fail_vdev: | |
1659 | vb2_queue_release(vbq); | |
1660 | fail_vbq: | |
1661 | v4l2_device_unregister_subdev(subdev); | |
1662 | fail_subdev: | |
1663 | media_entity_cleanup(&vdev->entity); | |
1664 | fail_vdev_media_entity: | |
1665 | media_entity_cleanup(&subdev->entity); | |
1666 | fail_subdev_media_entity: | |
1667 | cio2_fbpt_exit(q, &cio2->pci_dev->dev); | |
1668 | fail_fbpt: | |
1669 | mutex_destroy(&q->lock); | |
1670 | ||
1671 | return r; | |
1672 | } | |
1673 | ||
1674 | static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q) | |
1675 | { | |
1676 | video_unregister_device(&q->vdev); | |
1677 | media_entity_cleanup(&q->vdev.entity); | |
1678 | vb2_queue_release(&q->vbq); | |
1679 | v4l2_device_unregister_subdev(&q->subdev); | |
1680 | media_entity_cleanup(&q->subdev.entity); | |
1681 | cio2_fbpt_exit(q, &cio2->pci_dev->dev); | |
1682 | mutex_destroy(&q->lock); | |
1683 | } | |
1684 | ||
1685 | static int cio2_queues_init(struct cio2_device *cio2) | |
1686 | { | |
1687 | int i, r; | |
1688 | ||
1689 | for (i = 0; i < CIO2_QUEUES; i++) { | |
1690 | r = cio2_queue_init(cio2, &cio2->queue[i]); | |
1691 | if (r) | |
1692 | break; | |
1693 | } | |
1694 | ||
1695 | if (i == CIO2_QUEUES) | |
1696 | return 0; | |
1697 | ||
1698 | for (i--; i >= 0; i--) | |
1699 | cio2_queue_exit(cio2, &cio2->queue[i]); | |
1700 | ||
1701 | return r; | |
1702 | } | |
1703 | ||
1704 | static void cio2_queues_exit(struct cio2_device *cio2) | |
1705 | { | |
1706 | unsigned int i; | |
1707 | ||
1708 | for (i = 0; i < CIO2_QUEUES; i++) | |
1709 | cio2_queue_exit(cio2, &cio2->queue[i]); | |
1710 | } | |
1711 | ||
1712 | /**************** PCI interface ****************/ | |
1713 | ||
1714 | static int cio2_pci_config_setup(struct pci_dev *dev) | |
1715 | { | |
1716 | u16 pci_command; | |
1717 | int r = pci_enable_msi(dev); | |
1718 | ||
1719 | if (r) { | |
1720 | dev_err(&dev->dev, "failed to enable MSI (%d)\n", r); | |
1721 | return r; | |
1722 | } | |
1723 | ||
1724 | pci_read_config_word(dev, PCI_COMMAND, &pci_command); | |
1725 | pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | | |
1726 | PCI_COMMAND_INTX_DISABLE; | |
1727 | pci_write_config_word(dev, PCI_COMMAND, pci_command); | |
1728 | ||
1729 | return 0; | |
1730 | } | |
1731 | ||
1732 | static int cio2_pci_probe(struct pci_dev *pci_dev, | |
1733 | const struct pci_device_id *id) | |
1734 | { | |
1735 | struct cio2_device *cio2; | |
1736 | void __iomem *const *iomap; | |
1737 | int r; | |
1738 | ||
1739 | cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL); | |
1740 | if (!cio2) | |
1741 | return -ENOMEM; | |
1742 | cio2->pci_dev = pci_dev; | |
1743 | ||
1744 | r = pcim_enable_device(pci_dev); | |
1745 | if (r) { | |
1746 | dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r); | |
1747 | return r; | |
1748 | } | |
1749 | ||
1750 | dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n", | |
1751 | pci_dev->device, pci_dev->revision); | |
1752 | ||
1753 | r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev)); | |
1754 | if (r) { | |
1755 | dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r); | |
1756 | return -ENODEV; | |
1757 | } | |
1758 | ||
1759 | iomap = pcim_iomap_table(pci_dev); | |
1760 | if (!iomap) { | |
1761 | dev_err(&pci_dev->dev, "failed to iomap table\n"); | |
1762 | return -ENODEV; | |
1763 | } | |
1764 | ||
1765 | cio2->base = iomap[CIO2_PCI_BAR]; | |
1766 | ||
1767 | pci_set_drvdata(pci_dev, cio2); | |
1768 | ||
1769 | pci_set_master(pci_dev); | |
1770 | ||
1771 | r = pci_set_dma_mask(pci_dev, CIO2_DMA_MASK); | |
1772 | if (r) { | |
1773 | dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r); | |
1774 | return -ENODEV; | |
1775 | } | |
1776 | ||
1777 | r = cio2_pci_config_setup(pci_dev); | |
1778 | if (r) | |
1779 | return -ENODEV; | |
1780 | ||
1781 | r = cio2_fbpt_init_dummy(cio2); | |
1782 | if (r) | |
1783 | return r; | |
1784 | ||
1785 | mutex_init(&cio2->lock); | |
1786 | ||
1787 | cio2->media_dev.dev = &cio2->pci_dev->dev; | |
c0decac1 | 1788 | strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME, |
c2a6a07a YZ |
1789 | sizeof(cio2->media_dev.model)); |
1790 | snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info), | |
1791 | "PCI:%s", pci_name(cio2->pci_dev)); | |
1792 | cio2->media_dev.hw_revision = 0; | |
1793 | ||
1794 | media_device_init(&cio2->media_dev); | |
1795 | r = media_device_register(&cio2->media_dev); | |
1796 | if (r < 0) | |
1797 | goto fail_mutex_destroy; | |
1798 | ||
1799 | cio2->v4l2_dev.mdev = &cio2->media_dev; | |
1800 | r = v4l2_device_register(&pci_dev->dev, &cio2->v4l2_dev); | |
1801 | if (r) { | |
1802 | dev_err(&pci_dev->dev, | |
1803 | "failed to register V4L2 device (%d)\n", r); | |
1804 | goto fail_media_device_unregister; | |
1805 | } | |
1806 | ||
1807 | r = cio2_queues_init(cio2); | |
1808 | if (r) | |
1809 | goto fail_v4l2_device_unregister; | |
1810 | ||
1811 | /* Register notifier for subdevices we care */ | |
1812 | r = cio2_notifier_init(cio2); | |
1813 | if (r) | |
1814 | goto fail_cio2_queue_exit; | |
1815 | ||
1816 | r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq, | |
1817 | IRQF_SHARED, CIO2_NAME, cio2); | |
1818 | if (r) { | |
1819 | dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r); | |
1820 | goto fail; | |
1821 | } | |
1822 | ||
1823 | pm_runtime_put_noidle(&pci_dev->dev); | |
1824 | pm_runtime_allow(&pci_dev->dev); | |
1825 | ||
1826 | return 0; | |
1827 | ||
1828 | fail: | |
1829 | cio2_notifier_exit(cio2); | |
1830 | fail_cio2_queue_exit: | |
1831 | cio2_queues_exit(cio2); | |
1832 | fail_v4l2_device_unregister: | |
1833 | v4l2_device_unregister(&cio2->v4l2_dev); | |
1834 | fail_media_device_unregister: | |
1835 | media_device_unregister(&cio2->media_dev); | |
1836 | media_device_cleanup(&cio2->media_dev); | |
1837 | fail_mutex_destroy: | |
1838 | mutex_destroy(&cio2->lock); | |
1839 | cio2_fbpt_exit_dummy(cio2); | |
1840 | ||
1841 | return r; | |
1842 | } | |
1843 | ||
1844 | static void cio2_pci_remove(struct pci_dev *pci_dev) | |
1845 | { | |
1846 | struct cio2_device *cio2 = pci_get_drvdata(pci_dev); | |
c2a6a07a | 1847 | |
32388d6e | 1848 | media_device_unregister(&cio2->media_dev); |
c2a6a07a | 1849 | cio2_notifier_exit(cio2); |
4e26f692 | 1850 | cio2_queues_exit(cio2); |
32388d6e | 1851 | cio2_fbpt_exit_dummy(cio2); |
c2a6a07a | 1852 | v4l2_device_unregister(&cio2->v4l2_dev); |
c2a6a07a YZ |
1853 | media_device_cleanup(&cio2->media_dev); |
1854 | mutex_destroy(&cio2->lock); | |
1855 | } | |
1856 | ||
5eb8c768 | 1857 | static int __maybe_unused cio2_runtime_suspend(struct device *dev) |
c2a6a07a YZ |
1858 | { |
1859 | struct pci_dev *pci_dev = to_pci_dev(dev); | |
1860 | struct cio2_device *cio2 = pci_get_drvdata(pci_dev); | |
1861 | void __iomem *const base = cio2->base; | |
1862 | u16 pm; | |
1863 | ||
1864 | writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C); | |
1865 | dev_dbg(dev, "cio2 runtime suspend.\n"); | |
1866 | ||
1867 | pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm); | |
1868 | pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT; | |
1869 | pm |= CIO2_PMCSR_D3; | |
1870 | pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm); | |
1871 | ||
1872 | return 0; | |
1873 | } | |
1874 | ||
5eb8c768 | 1875 | static int __maybe_unused cio2_runtime_resume(struct device *dev) |
c2a6a07a YZ |
1876 | { |
1877 | struct pci_dev *pci_dev = to_pci_dev(dev); | |
1878 | struct cio2_device *cio2 = pci_get_drvdata(pci_dev); | |
1879 | void __iomem *const base = cio2->base; | |
1880 | u16 pm; | |
1881 | ||
1882 | writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C); | |
1883 | dev_dbg(dev, "cio2 runtime resume.\n"); | |
1884 | ||
1885 | pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm); | |
1886 | pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT; | |
1887 | pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm); | |
1888 | ||
1889 | return 0; | |
1890 | } | |
1891 | ||
1892 | /* | |
1893 | * Helper function to advance all the elements of a circular buffer by "start" | |
1894 | * positions | |
1895 | */ | |
1896 | static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start) | |
1897 | { | |
1898 | struct { | |
1899 | size_t begin, end; | |
1900 | } arr[2] = { | |
1901 | { 0, start - 1 }, | |
1902 | { start, elems - 1 }, | |
1903 | }; | |
1904 | ||
6afda56a | 1905 | #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1) |
c2a6a07a YZ |
1906 | |
1907 | /* Loop as long as we have out-of-place entries */ | |
6afda56a | 1908 | while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) { |
c2a6a07a YZ |
1909 | size_t size0, i; |
1910 | ||
1911 | /* | |
1912 | * Find the number of entries that can be arranged on this | |
1913 | * iteration. | |
1914 | */ | |
6afda56a | 1915 | size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1])); |
c2a6a07a YZ |
1916 | |
1917 | /* Swap the entries in two parts of the array. */ | |
1918 | for (i = 0; i < size0; i++) { | |
1919 | u8 *d = ptr + elem_size * (arr[1].begin + i); | |
1920 | u8 *s = ptr + elem_size * (arr[0].begin + i); | |
1921 | size_t j; | |
1922 | ||
1923 | for (j = 0; j < elem_size; j++) | |
1924 | swap(d[j], s[j]); | |
1925 | } | |
1926 | ||
6afda56a | 1927 | if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) { |
c2a6a07a YZ |
1928 | /* The end of the first array remains unarranged. */ |
1929 | arr[0].begin += size0; | |
1930 | } else { | |
1931 | /* | |
1932 | * The first array is fully arranged so we proceed | |
1933 | * handling the next one. | |
1934 | */ | |
1935 | arr[0].begin = arr[1].begin; | |
1936 | arr[0].end = arr[1].begin + size0 - 1; | |
1937 | arr[1].begin += size0; | |
1938 | } | |
1939 | } | |
1940 | } | |
1941 | ||
1942 | static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q) | |
1943 | { | |
1944 | unsigned int i, j; | |
1945 | ||
1946 | for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS; | |
1947 | i++, j = (j + 1) % CIO2_MAX_BUFFERS) | |
1948 | if (q->bufs[j]) | |
1949 | break; | |
1950 | ||
1951 | if (i == CIO2_MAX_BUFFERS) | |
1952 | return; | |
1953 | ||
1954 | if (j) { | |
1955 | arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS, | |
1956 | CIO2_MAX_BUFFERS, j); | |
1957 | arrange(q->bufs, sizeof(struct cio2_buffer *), | |
1958 | CIO2_MAX_BUFFERS, j); | |
1959 | } | |
1960 | ||
1961 | /* | |
1962 | * DMA clears the valid bit when accessing the buffer. | |
1963 | * When stopping stream in suspend callback, some of the buffers | |
1964 | * may be in invalid state. After resume, when DMA meets the invalid | |
1965 | * buffer, it will halt and stop receiving new data. | |
1966 | * To avoid DMA halting, set the valid bit for all buffers in FBPT. | |
1967 | */ | |
1968 | for (i = 0; i < CIO2_MAX_BUFFERS; i++) | |
1969 | cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS); | |
1970 | } | |
1971 | ||
2086dd35 | 1972 | static int __maybe_unused cio2_suspend(struct device *dev) |
c2a6a07a YZ |
1973 | { |
1974 | struct pci_dev *pci_dev = to_pci_dev(dev); | |
1975 | struct cio2_device *cio2 = pci_get_drvdata(pci_dev); | |
1976 | struct cio2_queue *q = cio2->cur_queue; | |
1977 | ||
1978 | dev_dbg(dev, "cio2 suspend\n"); | |
1979 | if (!cio2->streaming) | |
1980 | return 0; | |
1981 | ||
1982 | /* Stop stream */ | |
1983 | cio2_hw_exit(cio2, q); | |
d69a5a2c | 1984 | synchronize_irq(pci_dev->irq); |
c2a6a07a YZ |
1985 | |
1986 | pm_runtime_force_suspend(dev); | |
1987 | ||
1988 | /* | |
1989 | * Upon resume, hw starts to process the fbpt entries from beginning, | |
1990 | * so relocate the queued buffs to the fbpt head before suspend. | |
1991 | */ | |
1992 | cio2_fbpt_rearrange(cio2, q); | |
1993 | q->bufs_first = 0; | |
1994 | q->bufs_next = 0; | |
1995 | ||
1996 | return 0; | |
1997 | } | |
1998 | ||
2086dd35 | 1999 | static int __maybe_unused cio2_resume(struct device *dev) |
c2a6a07a YZ |
2000 | { |
2001 | struct pci_dev *pci_dev = to_pci_dev(dev); | |
2002 | struct cio2_device *cio2 = pci_get_drvdata(pci_dev); | |
2003 | int r = 0; | |
2004 | struct cio2_queue *q = cio2->cur_queue; | |
2005 | ||
2006 | dev_dbg(dev, "cio2 resume\n"); | |
2007 | if (!cio2->streaming) | |
2008 | return 0; | |
2009 | /* Start stream */ | |
2010 | r = pm_runtime_force_resume(&cio2->pci_dev->dev); | |
2011 | if (r < 0) { | |
2012 | dev_err(&cio2->pci_dev->dev, | |
2013 | "failed to set power %d\n", r); | |
2014 | return r; | |
2015 | } | |
2016 | ||
2017 | r = cio2_hw_init(cio2, q); | |
2018 | if (r) | |
2019 | dev_err(dev, "fail to init cio2 hw\n"); | |
2020 | ||
2021 | return r; | |
2022 | } | |
2023 | ||
2024 | static const struct dev_pm_ops cio2_pm_ops = { | |
2025 | SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL) | |
2026 | SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume) | |
2027 | }; | |
2028 | ||
2029 | static const struct pci_device_id cio2_pci_id_table[] = { | |
2030 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) }, | |
2031 | { 0 } | |
2032 | }; | |
2033 | ||
2034 | MODULE_DEVICE_TABLE(pci, cio2_pci_id_table); | |
2035 | ||
2036 | static struct pci_driver cio2_pci_driver = { | |
2037 | .name = CIO2_NAME, | |
2038 | .id_table = cio2_pci_id_table, | |
2039 | .probe = cio2_pci_probe, | |
2040 | .remove = cio2_pci_remove, | |
2041 | .driver = { | |
2042 | .pm = &cio2_pm_ops, | |
2043 | }, | |
2044 | }; | |
2045 | ||
2046 | module_pci_driver(cio2_pci_driver); | |
2047 | ||
2048 | MODULE_AUTHOR("Tuukka Toivonen <[email protected]>"); | |
2049 | MODULE_AUTHOR("Tianshu Qiu <[email protected]>"); | |
2050 | MODULE_AUTHOR("Jian Xu Zheng <[email protected]>"); | |
2051 | MODULE_AUTHOR("Yuning Pu <[email protected]>"); | |
2052 | MODULE_AUTHOR("Yong Zhi <[email protected]>"); | |
2053 | MODULE_LICENSE("GPL v2"); | |
2054 | MODULE_DESCRIPTION("IPU3 CIO2 driver"); |