1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 - 2018 Intel Corporation
4 * Copyright 2017 Google LLC
6 * Based on Intel IPU4 driver.
10 #include <linux/delay.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pm_runtime.h>
16 #include "ipu3-css-fw.h"
17 #include "ipu3-dmamap.h"
20 #define IMGU_PCI_ID 0x1919
21 #define IMGU_PCI_BAR 0
22 #define IMGU_DMA_MASK DMA_BIT_MASK(39)
23 #define IMGU_MAX_QUEUE_DEPTH (2 + 2)
26 * pre-allocated buffer size for IMGU dummy buffers. Those
27 * values should be tuned to big enough to avoid buffer
28 * re-allocation when streaming to lower streaming latency.
30 #define CSS_QUEUE_IN_BUF_SIZE 0
31 #define CSS_QUEUE_PARAMS_BUF_SIZE 0
32 #define CSS_QUEUE_OUT_BUF_SIZE (4160 * 3120 * 12 / 8)
33 #define CSS_QUEUE_VF_BUF_SIZE (1920 * 1080 * 12 / 8)
34 #define CSS_QUEUE_STAT_3A_BUF_SIZE sizeof(struct ipu3_uapi_stats_3a)
36 static const size_t css_queue_buf_size_map[IPU3_CSS_QUEUES] = {
37 [IPU3_CSS_QUEUE_IN] = CSS_QUEUE_IN_BUF_SIZE,
38 [IPU3_CSS_QUEUE_PARAMS] = CSS_QUEUE_PARAMS_BUF_SIZE,
39 [IPU3_CSS_QUEUE_OUT] = CSS_QUEUE_OUT_BUF_SIZE,
40 [IPU3_CSS_QUEUE_VF] = CSS_QUEUE_VF_BUF_SIZE,
41 [IPU3_CSS_QUEUE_STAT_3A] = CSS_QUEUE_STAT_3A_BUF_SIZE,
44 static const struct imgu_node_mapping imgu_node_map[IMGU_NODE_NUM] = {
45 [IMGU_NODE_IN] = {IPU3_CSS_QUEUE_IN, "input"},
46 [IMGU_NODE_PARAMS] = {IPU3_CSS_QUEUE_PARAMS, "parameters"},
47 [IMGU_NODE_OUT] = {IPU3_CSS_QUEUE_OUT, "output"},
48 [IMGU_NODE_VF] = {IPU3_CSS_QUEUE_VF, "viewfinder"},
49 [IMGU_NODE_STAT_3A] = {IPU3_CSS_QUEUE_STAT_3A, "3a stat"},
52 unsigned int imgu_node_to_queue(unsigned int node)
54 return imgu_node_map[node].css_queue;
57 unsigned int imgu_map_node(struct imgu_device *imgu, unsigned int css_queue)
61 for (i = 0; i < IMGU_NODE_NUM; i++)
62 if (imgu_node_map[i].css_queue == css_queue)
68 /**************** Dummy buffers ****************/
70 static void imgu_dummybufs_cleanup(struct imgu_device *imgu, unsigned int pipe)
73 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
75 for (i = 0; i < IPU3_CSS_QUEUES; i++)
76 imgu_dmamap_free(imgu,
77 &imgu_pipe->queues[i].dmap);
80 static int imgu_dummybufs_preallocate(struct imgu_device *imgu,
85 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
87 for (i = 0; i < IPU3_CSS_QUEUES; i++) {
88 size = css_queue_buf_size_map[i];
90 * Do not enable dummy buffers for master queue,
91 * always require that real buffers from user are
94 if (i == IMGU_QUEUE_MASTER || size == 0)
97 if (!imgu_dmamap_alloc(imgu,
98 &imgu_pipe->queues[i].dmap, size)) {
99 imgu_dummybufs_cleanup(imgu, pipe);
107 static int imgu_dummybufs_init(struct imgu_device *imgu, unsigned int pipe)
109 const struct v4l2_pix_format_mplane *mpix;
110 const struct v4l2_meta_format *meta;
111 unsigned int i, k, node;
113 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
115 /* Allocate a dummy buffer for each queue where buffer is optional */
116 for (i = 0; i < IPU3_CSS_QUEUES; i++) {
117 node = imgu_map_node(imgu, i);
118 if (!imgu_pipe->queue_enabled[node] || i == IMGU_QUEUE_MASTER)
121 if (!imgu_pipe->nodes[IMGU_NODE_VF].enabled &&
122 i == IPU3_CSS_QUEUE_VF)
124 * Do not enable dummy buffers for VF if it is not
125 * requested by the user.
129 meta = &imgu_pipe->nodes[node].vdev_fmt.fmt.meta;
130 mpix = &imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp;
132 if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS)
133 size = meta->buffersize;
135 size = mpix->plane_fmt[0].sizeimage;
137 if (imgu_css_dma_buffer_resize(imgu,
138 &imgu_pipe->queues[i].dmap,
140 imgu_dummybufs_cleanup(imgu, pipe);
144 for (k = 0; k < IMGU_MAX_QUEUE_DEPTH; k++)
145 imgu_css_buf_init(&imgu_pipe->queues[i].dummybufs[k], i,
146 imgu_pipe->queues[i].dmap.daddr);
152 /* May be called from atomic context */
153 static struct imgu_css_buffer *imgu_dummybufs_get(struct imgu_device *imgu,
154 int queue, unsigned int pipe)
157 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
159 /* dummybufs are not allocated for master q */
160 if (queue == IPU3_CSS_QUEUE_IN)
163 if (WARN_ON(!imgu_pipe->queues[queue].dmap.vaddr))
164 /* Buffer should not be allocated here */
167 for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
168 if (imgu_css_buf_state(&imgu_pipe->queues[queue].dummybufs[i]) !=
169 IPU3_CSS_BUFFER_QUEUED)
172 if (i == IMGU_MAX_QUEUE_DEPTH)
175 imgu_css_buf_init(&imgu_pipe->queues[queue].dummybufs[i], queue,
176 imgu_pipe->queues[queue].dmap.daddr);
178 return &imgu_pipe->queues[queue].dummybufs[i];
181 /* Check if given buffer is a dummy buffer */
182 static bool imgu_dummybufs_check(struct imgu_device *imgu,
183 struct imgu_css_buffer *buf,
187 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
189 for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
190 if (buf == &imgu_pipe->queues[buf->queue].dummybufs[i])
193 return i < IMGU_MAX_QUEUE_DEPTH;
196 static void imgu_buffer_done(struct imgu_device *imgu, struct vb2_buffer *vb,
197 enum vb2_buffer_state state)
199 mutex_lock(&imgu->lock);
200 imgu_v4l2_buffer_done(vb, state);
201 mutex_unlock(&imgu->lock);
204 static struct imgu_css_buffer *imgu_queue_getbuf(struct imgu_device *imgu,
208 struct imgu_buffer *buf;
209 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
211 if (WARN_ON(node >= IMGU_NODE_NUM))
214 /* Find first free buffer from the node */
215 list_for_each_entry(buf, &imgu_pipe->nodes[node].buffers, vid_buf.list) {
216 if (imgu_css_buf_state(&buf->css_buf) == IPU3_CSS_BUFFER_NEW)
217 return &buf->css_buf;
220 /* There were no free buffers, try to return a dummy buffer */
221 return imgu_dummybufs_get(imgu, imgu_node_map[node].css_queue, pipe);
225 * Queue as many buffers to CSS as possible. If all buffers don't fit into
226 * CSS buffer queues, they remain unqueued and will be queued later.
228 int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe)
232 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
234 if (!imgu_css_is_streaming(&imgu->css))
237 dev_dbg(&imgu->pci_dev->dev, "Queue buffers to pipe %d", pipe);
238 mutex_lock(&imgu->lock);
240 if (!imgu_css_pipe_queue_empty(&imgu->css, pipe)) {
241 mutex_unlock(&imgu->lock);
245 /* Buffer set is queued to FW only when input buffer is ready */
246 for (node = IMGU_NODE_NUM - 1;
247 imgu_queue_getbuf(imgu, IMGU_NODE_IN, pipe);
248 node = node ? node - 1 : IMGU_NODE_NUM - 1) {
249 if (node == IMGU_NODE_VF &&
250 !imgu_pipe->nodes[IMGU_NODE_VF].enabled) {
251 dev_warn(&imgu->pci_dev->dev,
252 "Vf not enabled, ignore queue");
254 } else if (node == IMGU_NODE_PARAMS &&
255 imgu_pipe->nodes[node].enabled) {
256 struct vb2_buffer *vb;
257 struct imgu_vb2_buffer *ivb;
259 /* No parameters for this frame */
260 if (list_empty(&imgu_pipe->nodes[node].buffers))
263 ivb = list_first_entry(&imgu_pipe->nodes[node].buffers,
264 struct imgu_vb2_buffer, list);
265 list_del(&ivb->list);
266 vb = &ivb->vbb.vb2_buf;
267 r = imgu_css_set_parameters(&imgu->css, pipe,
268 vb2_plane_vaddr(vb, 0));
270 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
271 dev_warn(&imgu->pci_dev->dev,
272 "set parameters failed.");
276 vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
277 dev_dbg(&imgu->pci_dev->dev,
278 "queue user parameters %d to css.", vb->index);
279 } else if (imgu_pipe->queue_enabled[node]) {
280 struct imgu_css_buffer *buf =
281 imgu_queue_getbuf(imgu, node, pipe);
282 struct imgu_buffer *ibuf = NULL;
288 r = imgu_css_buf_queue(&imgu->css, pipe, buf);
291 dummy = imgu_dummybufs_check(imgu, buf, pipe);
293 ibuf = container_of(buf, struct imgu_buffer,
295 dev_dbg(&imgu->pci_dev->dev,
296 "queue %s %s buffer %u to css da: 0x%08x\n",
297 dummy ? "dummy" : "user",
298 imgu_node_map[node].name,
299 dummy ? 0 : ibuf->vid_buf.vbb.vb2_buf.index,
303 mutex_unlock(&imgu->lock);
305 if (r && r != -EBUSY)
312 * On error, mark all buffers as failed which are not
315 dev_err(&imgu->pci_dev->dev,
316 "failed to queue buffer to CSS on queue %i (%d)\n",
320 /* If we were called from streamon(), no need to finish bufs */
323 for (node = 0; node < IMGU_NODE_NUM; node++) {
324 struct imgu_buffer *buf, *buf0;
326 if (!imgu_pipe->queue_enabled[node])
327 continue; /* Skip disabled queues */
329 mutex_lock(&imgu->lock);
330 list_for_each_entry_safe(buf, buf0,
331 &imgu_pipe->nodes[node].buffers,
333 if (imgu_css_buf_state(&buf->css_buf) ==
334 IPU3_CSS_BUFFER_QUEUED)
335 continue; /* Was already queued, skip */
337 imgu_v4l2_buffer_done(&buf->vid_buf.vbb.vb2_buf,
338 VB2_BUF_STATE_ERROR);
340 mutex_unlock(&imgu->lock);
346 static int imgu_powerup(struct imgu_device *imgu)
350 unsigned int freq = 200;
351 struct v4l2_mbus_framefmt *fmt;
353 /* input larger than 2048*1152, ask imgu to work on high freq */
354 for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
355 fmt = &imgu->imgu_pipe[pipe].nodes[IMGU_NODE_IN].pad_fmt;
356 dev_dbg(&imgu->pci_dev->dev, "pipe %u input format = %ux%u",
357 pipe, fmt->width, fmt->height);
358 if ((fmt->width * fmt->height) >= (2048 * 1152))
362 r = imgu_css_set_powerup(&imgu->pci_dev->dev, imgu->base, freq);
366 imgu_mmu_resume(imgu->mmu);
370 static void imgu_powerdown(struct imgu_device *imgu)
372 imgu_mmu_suspend(imgu->mmu);
373 imgu_css_set_powerdown(&imgu->pci_dev->dev, imgu->base);
376 int imgu_s_stream(struct imgu_device *imgu, int enable)
378 struct device *dev = &imgu->pci_dev->dev;
383 dev_dbg(dev, "stream off\n");
384 /* Block new buffers to be queued to CSS. */
385 atomic_set(&imgu->qbuf_barrier, 1);
386 imgu_css_stop_streaming(&imgu->css);
387 synchronize_irq(imgu->pci_dev->irq);
388 atomic_set(&imgu->qbuf_barrier, 0);
389 imgu_powerdown(imgu);
390 pm_runtime_put(&imgu->pci_dev->dev);
396 r = pm_runtime_resume_and_get(dev);
398 dev_err(dev, "failed to set imgu power\n");
402 r = imgu_powerup(imgu);
404 dev_err(dev, "failed to power up imgu\n");
409 /* Start CSS streaming */
410 r = imgu_css_start_streaming(&imgu->css);
412 dev_err(dev, "failed to start css streaming (%d)", r);
413 goto fail_start_streaming;
416 for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
417 /* Initialize dummy buffers */
418 r = imgu_dummybufs_init(imgu, pipe);
420 dev_err(dev, "failed to initialize dummy buffers (%d)", r);
424 /* Queue as many buffers from queue as possible */
425 r = imgu_queue_buffers(imgu, true, pipe);
427 dev_err(dev, "failed to queue initial buffers (%d)", r);
434 for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
435 imgu_dummybufs_cleanup(imgu, pipe);
437 imgu_css_stop_streaming(&imgu->css);
438 fail_start_streaming:
444 static void imgu_video_nodes_exit(struct imgu_device *imgu)
448 for (i = 0; i < IMGU_MAX_PIPE_NUM; i++)
449 imgu_dummybufs_cleanup(imgu, i);
451 imgu_v4l2_unregister(imgu);
454 static int imgu_video_nodes_init(struct imgu_device *imgu)
456 struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
457 struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
458 struct imgu_media_pipe *imgu_pipe;
462 imgu->buf_struct_size = sizeof(struct imgu_buffer);
464 for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) {
465 imgu_pipe = &imgu->imgu_pipe[j];
467 for (i = 0; i < IMGU_NODE_NUM; i++) {
468 imgu_pipe->nodes[i].name = imgu_node_map[i].name;
469 imgu_pipe->nodes[i].output = i < IMGU_QUEUE_FIRST_INPUT;
470 imgu_pipe->nodes[i].enabled = false;
472 if (i != IMGU_NODE_PARAMS && i != IMGU_NODE_STAT_3A)
473 fmts[imgu_node_map[i].css_queue] =
474 &imgu_pipe->nodes[i].vdev_fmt.fmt.pix_mp;
475 atomic_set(&imgu_pipe->nodes[i].sequence, 0);
479 r = imgu_v4l2_register(imgu);
483 /* Set initial formats and initialize formats of video nodes */
484 for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) {
485 imgu_pipe = &imgu->imgu_pipe[j];
487 rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_pipe->imgu_sd.rect.eff;
488 rects[IPU3_CSS_RECT_BDS] = &imgu_pipe->imgu_sd.rect.bds;
489 imgu_css_fmt_set(&imgu->css, fmts, rects, j);
491 /* Pre-allocate dummy buffers */
492 r = imgu_dummybufs_preallocate(imgu, j);
494 dev_err(&imgu->pci_dev->dev,
495 "failed to pre-allocate dummy buffers (%d)", r);
503 imgu_video_nodes_exit(imgu);
508 /**************** PCI interface ****************/
510 static irqreturn_t imgu_isr_threaded(int irq, void *imgu_ptr)
512 struct imgu_device *imgu = imgu_ptr;
513 struct imgu_media_pipe *imgu_pipe;
516 /* Dequeue / queue buffers */
518 u64 ns = ktime_get_ns();
519 struct imgu_css_buffer *b;
520 struct imgu_buffer *buf = NULL;
521 unsigned int node, pipe;
525 mutex_lock(&imgu->lock);
526 b = imgu_css_buf_dequeue(&imgu->css);
527 mutex_unlock(&imgu->lock);
528 } while (PTR_ERR(b) == -EAGAIN);
531 if (PTR_ERR(b) != -EBUSY) /* All done */
532 dev_err(&imgu->pci_dev->dev,
533 "failed to dequeue buffers (%ld)\n",
538 node = imgu_map_node(imgu, b->queue);
540 dummy = imgu_dummybufs_check(imgu, b, pipe);
542 buf = container_of(b, struct imgu_buffer, css_buf);
543 dev_dbg(&imgu->pci_dev->dev,
544 "dequeue %s %s buffer %d daddr 0x%x from css\n",
545 dummy ? "dummy" : "user",
546 imgu_node_map[node].name,
547 dummy ? 0 : buf->vid_buf.vbb.vb2_buf.index,
551 /* It was a dummy buffer, skip it */
554 /* Fill vb2 buffer entries and tell it's ready */
555 imgu_pipe = &imgu->imgu_pipe[pipe];
556 if (!imgu_pipe->nodes[node].output) {
557 buf->vid_buf.vbb.vb2_buf.timestamp = ns;
558 buf->vid_buf.vbb.field = V4L2_FIELD_NONE;
559 buf->vid_buf.vbb.sequence =
561 &imgu_pipe->nodes[node].sequence);
562 dev_dbg(&imgu->pci_dev->dev, "vb2 buffer sequence %d",
563 buf->vid_buf.vbb.sequence);
565 imgu_buffer_done(imgu, &buf->vid_buf.vbb.vb2_buf,
566 imgu_css_buf_state(&buf->css_buf) ==
567 IPU3_CSS_BUFFER_DONE ?
569 VB2_BUF_STATE_ERROR);
570 mutex_lock(&imgu->lock);
571 if (imgu_css_queue_empty(&imgu->css))
572 wake_up_all(&imgu->buf_drain_wq);
573 mutex_unlock(&imgu->lock);
577 * Try to queue more buffers for CSS.
578 * qbuf_barrier is used to disable new buffers
579 * to be queued to CSS.
581 if (!atomic_read(&imgu->qbuf_barrier))
582 for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
583 imgu_queue_buffers(imgu, false, p);
588 static irqreturn_t imgu_isr(int irq, void *imgu_ptr)
590 struct imgu_device *imgu = imgu_ptr;
592 /* acknowledge interruption */
593 if (imgu_css_irq_ack(&imgu->css) < 0)
596 return IRQ_WAKE_THREAD;
599 static int imgu_pci_config_setup(struct pci_dev *dev)
602 int r = pci_enable_msi(dev);
605 dev_err(&dev->dev, "failed to enable MSI (%d)\n", r);
609 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
610 pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
611 PCI_COMMAND_INTX_DISABLE;
612 pci_write_config_word(dev, PCI_COMMAND, pci_command);
617 static int imgu_pci_probe(struct pci_dev *pci_dev,
618 const struct pci_device_id *id)
620 struct imgu_device *imgu;
622 unsigned long phys_len;
623 void __iomem *const *iomap;
626 imgu = devm_kzalloc(&pci_dev->dev, sizeof(*imgu), GFP_KERNEL);
630 imgu->pci_dev = pci_dev;
632 r = pcim_enable_device(pci_dev);
634 dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
638 dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
639 pci_dev->device, pci_dev->revision);
641 phys = pci_resource_start(pci_dev, IMGU_PCI_BAR);
642 phys_len = pci_resource_len(pci_dev, IMGU_PCI_BAR);
644 r = pcim_iomap_regions(pci_dev, 1 << IMGU_PCI_BAR, pci_name(pci_dev));
646 dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
649 dev_info(&pci_dev->dev, "physical base address %pap, %lu bytes\n",
652 iomap = pcim_iomap_table(pci_dev);
654 dev_err(&pci_dev->dev, "failed to iomap table\n");
658 imgu->base = iomap[IMGU_PCI_BAR];
660 pci_set_drvdata(pci_dev, imgu);
662 pci_set_master(pci_dev);
664 r = dma_coerce_mask_and_coherent(&pci_dev->dev, IMGU_DMA_MASK);
666 dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
670 r = imgu_pci_config_setup(pci_dev);
674 mutex_init(&imgu->lock);
675 mutex_init(&imgu->streaming_lock);
676 atomic_set(&imgu->qbuf_barrier, 0);
677 init_waitqueue_head(&imgu->buf_drain_wq);
679 r = imgu_css_set_powerup(&pci_dev->dev, imgu->base, 200);
681 dev_err(&pci_dev->dev,
682 "failed to power up CSS (%d)\n", r);
683 goto out_mutex_destroy;
686 imgu->mmu = imgu_mmu_init(&pci_dev->dev, imgu->base);
687 if (IS_ERR(imgu->mmu)) {
688 r = PTR_ERR(imgu->mmu);
689 dev_err(&pci_dev->dev, "failed to initialize MMU (%d)\n", r);
690 goto out_css_powerdown;
693 r = imgu_dmamap_init(imgu);
695 dev_err(&pci_dev->dev,
696 "failed to initialize DMA mapping (%d)\n", r);
700 /* ISP programming */
701 r = imgu_css_init(&pci_dev->dev, &imgu->css, imgu->base, phys_len);
703 dev_err(&pci_dev->dev, "failed to initialize CSS (%d)\n", r);
704 goto out_dmamap_exit;
707 /* v4l2 sub-device registration */
708 r = imgu_video_nodes_init(imgu);
710 dev_err(&pci_dev->dev, "failed to create V4L2 devices (%d)\n",
712 goto out_css_cleanup;
715 r = devm_request_threaded_irq(&pci_dev->dev, pci_dev->irq,
716 imgu_isr, imgu_isr_threaded,
717 IRQF_SHARED, IMGU_NAME, imgu);
719 dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
723 pm_runtime_put_noidle(&pci_dev->dev);
724 pm_runtime_allow(&pci_dev->dev);
729 imgu_video_nodes_exit(imgu);
731 imgu_css_cleanup(&imgu->css);
733 imgu_dmamap_exit(imgu);
735 imgu_mmu_exit(imgu->mmu);
737 imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
739 mutex_destroy(&imgu->streaming_lock);
740 mutex_destroy(&imgu->lock);
745 static void imgu_pci_remove(struct pci_dev *pci_dev)
747 struct imgu_device *imgu = pci_get_drvdata(pci_dev);
749 pm_runtime_forbid(&pci_dev->dev);
750 pm_runtime_get_noresume(&pci_dev->dev);
752 imgu_video_nodes_exit(imgu);
753 imgu_css_cleanup(&imgu->css);
754 imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
755 imgu_dmamap_exit(imgu);
756 imgu_mmu_exit(imgu->mmu);
757 mutex_destroy(&imgu->streaming_lock);
758 mutex_destroy(&imgu->lock);
761 static int __maybe_unused imgu_suspend(struct device *dev)
763 struct pci_dev *pci_dev = to_pci_dev(dev);
764 struct imgu_device *imgu = pci_get_drvdata(pci_dev);
766 imgu->suspend_in_stream = imgu_css_is_streaming(&imgu->css);
767 if (!imgu->suspend_in_stream)
769 /* Block new buffers to be queued to CSS. */
770 atomic_set(&imgu->qbuf_barrier, 1);
772 * Wait for currently running irq handler to be done so that
773 * no new buffers will be queued to fw later.
775 synchronize_irq(pci_dev->irq);
776 /* Wait until all buffers in CSS are done. */
777 if (!wait_event_timeout(imgu->buf_drain_wq,
778 imgu_css_queue_empty(&imgu->css), msecs_to_jiffies(1000)))
779 dev_err(dev, "wait buffer drain timeout.\n");
781 imgu_css_stop_streaming(&imgu->css);
782 atomic_set(&imgu->qbuf_barrier, 0);
783 imgu_powerdown(imgu);
784 pm_runtime_force_suspend(dev);
789 static int __maybe_unused imgu_resume(struct device *dev)
791 struct imgu_device *imgu = dev_get_drvdata(dev);
795 if (!imgu->suspend_in_stream)
798 pm_runtime_force_resume(dev);
800 r = imgu_powerup(imgu);
802 dev_err(dev, "failed to power up imgu\n");
806 /* Start CSS streaming */
807 r = imgu_css_start_streaming(&imgu->css);
809 dev_err(dev, "failed to resume css streaming (%d)", r);
813 for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
814 r = imgu_queue_buffers(imgu, true, pipe);
816 dev_err(dev, "failed to queue buffers to pipe %d (%d)",
825 * PCI rpm framework checks the existence of driver rpm callbacks.
826 * Place a dummy callback here to avoid rpm going into error state.
828 static __maybe_unused int imgu_rpm_dummy_cb(struct device *dev)
833 static const struct dev_pm_ops imgu_pm_ops = {
834 SET_RUNTIME_PM_OPS(&imgu_rpm_dummy_cb, &imgu_rpm_dummy_cb, NULL)
835 SET_SYSTEM_SLEEP_PM_OPS(&imgu_suspend, &imgu_resume)
838 static const struct pci_device_id imgu_pci_tbl[] = {
839 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, IMGU_PCI_ID) },
843 MODULE_DEVICE_TABLE(pci, imgu_pci_tbl);
845 static struct pci_driver imgu_pci_driver = {
847 .id_table = imgu_pci_tbl,
848 .probe = imgu_pci_probe,
849 .remove = imgu_pci_remove,
855 module_pci_driver(imgu_pci_driver);
857 MODULE_AUTHOR("Tuukka Toivonen");
859 MODULE_AUTHOR("Jian Xu Zheng");
860 MODULE_AUTHOR("Yuning Pu");
862 MODULE_LICENSE("GPL v2");
863 MODULE_DESCRIPTION("Intel ipu3_imgu PCI driver");
864 MODULE_FIRMWARE(IMGU_FW_NAME);
865 MODULE_FIRMWARE(IMGU_FW_NAME_20161208);
866 MODULE_FIRMWARE(IMGU_FW_NAME_IPU_20161208);