]> Git Repo - linux.git/blob - drivers/staging/media/ipu3/ipu3.c
x86/kaslr: Expose and use the end of the physical memory address space
[linux.git] / drivers / staging / media / ipu3 / ipu3.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 - 2018 Intel Corporation
4  * Copyright 2017 Google LLC
5  *
6  * Based on Intel IPU4 driver.
7  *
8  */
9
10 #include <linux/delay.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pm_runtime.h>
14
15 #include "ipu3.h"
16 #include "ipu3-css-fw.h"
17 #include "ipu3-dmamap.h"
18 #include "ipu3-mmu.h"
19
20 #define IMGU_PCI_ID                     0x1919
21 #define IMGU_PCI_BAR                    0
22 #define IMGU_DMA_MASK                   DMA_BIT_MASK(39)
23 #define IMGU_MAX_QUEUE_DEPTH            (2 + 2)
24
25 /*
26  * pre-allocated buffer size for IMGU dummy buffers. Those
27  * values should be tuned to big enough to avoid buffer
28  * re-allocation when streaming to lower streaming latency.
29  */
30 #define CSS_QUEUE_IN_BUF_SIZE           0
31 #define CSS_QUEUE_PARAMS_BUF_SIZE       0
32 #define CSS_QUEUE_OUT_BUF_SIZE          (4160 * 3120 * 12 / 8)
33 #define CSS_QUEUE_VF_BUF_SIZE           (1920 * 1080 * 12 / 8)
34 #define CSS_QUEUE_STAT_3A_BUF_SIZE      sizeof(struct ipu3_uapi_stats_3a)
35
36 static const size_t css_queue_buf_size_map[IPU3_CSS_QUEUES] = {
37         [IPU3_CSS_QUEUE_IN] = CSS_QUEUE_IN_BUF_SIZE,
38         [IPU3_CSS_QUEUE_PARAMS] = CSS_QUEUE_PARAMS_BUF_SIZE,
39         [IPU3_CSS_QUEUE_OUT] = CSS_QUEUE_OUT_BUF_SIZE,
40         [IPU3_CSS_QUEUE_VF] = CSS_QUEUE_VF_BUF_SIZE,
41         [IPU3_CSS_QUEUE_STAT_3A] = CSS_QUEUE_STAT_3A_BUF_SIZE,
42 };
43
44 static const struct imgu_node_mapping imgu_node_map[IMGU_NODE_NUM] = {
45         [IMGU_NODE_IN] = {IPU3_CSS_QUEUE_IN, "input"},
46         [IMGU_NODE_PARAMS] = {IPU3_CSS_QUEUE_PARAMS, "parameters"},
47         [IMGU_NODE_OUT] = {IPU3_CSS_QUEUE_OUT, "output"},
48         [IMGU_NODE_VF] = {IPU3_CSS_QUEUE_VF, "viewfinder"},
49         [IMGU_NODE_STAT_3A] = {IPU3_CSS_QUEUE_STAT_3A, "3a stat"},
50 };
51
52 unsigned int imgu_node_to_queue(unsigned int node)
53 {
54         return imgu_node_map[node].css_queue;
55 }
56
57 unsigned int imgu_map_node(struct imgu_device *imgu, unsigned int css_queue)
58 {
59         unsigned int i;
60
61         for (i = 0; i < IMGU_NODE_NUM; i++)
62                 if (imgu_node_map[i].css_queue == css_queue)
63                         break;
64
65         return i;
66 }
67
68 /**************** Dummy buffers ****************/
69
70 static void imgu_dummybufs_cleanup(struct imgu_device *imgu, unsigned int pipe)
71 {
72         unsigned int i;
73         struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
74
75         for (i = 0; i < IPU3_CSS_QUEUES; i++)
76                 imgu_dmamap_free(imgu,
77                                  &imgu_pipe->queues[i].dmap);
78 }
79
80 static int imgu_dummybufs_preallocate(struct imgu_device *imgu,
81                                       unsigned int pipe)
82 {
83         unsigned int i;
84         size_t size;
85         struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
86
87         for (i = 0; i < IPU3_CSS_QUEUES; i++) {
88                 size = css_queue_buf_size_map[i];
89                 /*
90                  * Do not enable dummy buffers for master queue,
91                  * always require that real buffers from user are
92                  * available.
93                  */
94                 if (i == IMGU_QUEUE_MASTER || size == 0)
95                         continue;
96
97                 if (!imgu_dmamap_alloc(imgu,
98                                        &imgu_pipe->queues[i].dmap, size)) {
99                         imgu_dummybufs_cleanup(imgu, pipe);
100                         return -ENOMEM;
101                 }
102         }
103
104         return 0;
105 }
106
107 static int imgu_dummybufs_init(struct imgu_device *imgu, unsigned int pipe)
108 {
109         const struct v4l2_pix_format_mplane *mpix;
110         const struct v4l2_meta_format   *meta;
111         unsigned int i, k, node;
112         size_t size;
113         struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
114
115         /* Allocate a dummy buffer for each queue where buffer is optional */
116         for (i = 0; i < IPU3_CSS_QUEUES; i++) {
117                 node = imgu_map_node(imgu, i);
118                 if (!imgu_pipe->queue_enabled[node] || i == IMGU_QUEUE_MASTER)
119                         continue;
120
121                 if (!imgu_pipe->nodes[IMGU_NODE_VF].enabled &&
122                     i == IPU3_CSS_QUEUE_VF)
123                         /*
124                          * Do not enable dummy buffers for VF if it is not
125                          * requested by the user.
126                          */
127                         continue;
128
129                 meta = &imgu_pipe->nodes[node].vdev_fmt.fmt.meta;
130                 mpix = &imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp;
131
132                 if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS)
133                         size = meta->buffersize;
134                 else
135                         size = mpix->plane_fmt[0].sizeimage;
136
137                 if (imgu_css_dma_buffer_resize(imgu,
138                                                &imgu_pipe->queues[i].dmap,
139                                                size)) {
140                         imgu_dummybufs_cleanup(imgu, pipe);
141                         return -ENOMEM;
142                 }
143
144                 for (k = 0; k < IMGU_MAX_QUEUE_DEPTH; k++)
145                         imgu_css_buf_init(&imgu_pipe->queues[i].dummybufs[k], i,
146                                           imgu_pipe->queues[i].dmap.daddr);
147         }
148
149         return 0;
150 }
151
152 /* May be called from atomic context */
153 static struct imgu_css_buffer *imgu_dummybufs_get(struct imgu_device *imgu,
154                                                    int queue, unsigned int pipe)
155 {
156         unsigned int i;
157         struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
158
159         /* dummybufs are not allocated for master q */
160         if (queue == IPU3_CSS_QUEUE_IN)
161                 return NULL;
162
163         if (WARN_ON(!imgu_pipe->queues[queue].dmap.vaddr))
164                 /* Buffer should not be allocated here */
165                 return NULL;
166
167         for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
168                 if (imgu_css_buf_state(&imgu_pipe->queues[queue].dummybufs[i]) !=
169                         IPU3_CSS_BUFFER_QUEUED)
170                         break;
171
172         if (i == IMGU_MAX_QUEUE_DEPTH)
173                 return NULL;
174
175         imgu_css_buf_init(&imgu_pipe->queues[queue].dummybufs[i], queue,
176                           imgu_pipe->queues[queue].dmap.daddr);
177
178         return &imgu_pipe->queues[queue].dummybufs[i];
179 }
180
181 /* Check if given buffer is a dummy buffer */
182 static bool imgu_dummybufs_check(struct imgu_device *imgu,
183                                  struct imgu_css_buffer *buf,
184                                  unsigned int pipe)
185 {
186         unsigned int i;
187         struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
188
189         for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
190                 if (buf == &imgu_pipe->queues[buf->queue].dummybufs[i])
191                         break;
192
193         return i < IMGU_MAX_QUEUE_DEPTH;
194 }
195
196 static void imgu_buffer_done(struct imgu_device *imgu, struct vb2_buffer *vb,
197                              enum vb2_buffer_state state)
198 {
199         mutex_lock(&imgu->lock);
200         imgu_v4l2_buffer_done(vb, state);
201         mutex_unlock(&imgu->lock);
202 }
203
204 static struct imgu_css_buffer *imgu_queue_getbuf(struct imgu_device *imgu,
205                                                  unsigned int node,
206                                                  unsigned int pipe)
207 {
208         struct imgu_buffer *buf;
209         struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
210
211         if (WARN_ON(node >= IMGU_NODE_NUM))
212                 return NULL;
213
214         /* Find first free buffer from the node */
215         list_for_each_entry(buf, &imgu_pipe->nodes[node].buffers, vid_buf.list) {
216                 if (imgu_css_buf_state(&buf->css_buf) == IPU3_CSS_BUFFER_NEW)
217                         return &buf->css_buf;
218         }
219
220         /* There were no free buffers, try to return a dummy buffer */
221         return imgu_dummybufs_get(imgu, imgu_node_map[node].css_queue, pipe);
222 }
223
224 /*
225  * Queue as many buffers to CSS as possible. If all buffers don't fit into
226  * CSS buffer queues, they remain unqueued and will be queued later.
227  */
228 int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe)
229 {
230         unsigned int node;
231         int r = 0;
232         struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
233
234         if (!imgu_css_is_streaming(&imgu->css))
235                 return 0;
236
237         dev_dbg(&imgu->pci_dev->dev, "Queue buffers to pipe %d", pipe);
238         mutex_lock(&imgu->lock);
239
240         if (!imgu_css_pipe_queue_empty(&imgu->css, pipe)) {
241                 mutex_unlock(&imgu->lock);
242                 return 0;
243         }
244
245         /* Buffer set is queued to FW only when input buffer is ready */
246         for (node = IMGU_NODE_NUM - 1;
247              imgu_queue_getbuf(imgu, IMGU_NODE_IN, pipe);
248              node = node ? node - 1 : IMGU_NODE_NUM - 1) {
249                 if (node == IMGU_NODE_VF &&
250                     !imgu_pipe->nodes[IMGU_NODE_VF].enabled) {
251                         dev_warn(&imgu->pci_dev->dev,
252                                  "Vf not enabled, ignore queue");
253                         continue;
254                 } else if (node == IMGU_NODE_PARAMS &&
255                            imgu_pipe->nodes[node].enabled) {
256                         struct vb2_buffer *vb;
257                         struct imgu_vb2_buffer *ivb;
258
259                         /* No parameters for this frame */
260                         if (list_empty(&imgu_pipe->nodes[node].buffers))
261                                 continue;
262
263                         ivb = list_first_entry(&imgu_pipe->nodes[node].buffers,
264                                                struct imgu_vb2_buffer, list);
265                         list_del(&ivb->list);
266                         vb = &ivb->vbb.vb2_buf;
267                         r = imgu_css_set_parameters(&imgu->css, pipe,
268                                                     vb2_plane_vaddr(vb, 0));
269                         if (r) {
270                                 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
271                                 dev_warn(&imgu->pci_dev->dev,
272                                          "set parameters failed.");
273                                 continue;
274                         }
275
276                         vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
277                         dev_dbg(&imgu->pci_dev->dev,
278                                 "queue user parameters %d to css.", vb->index);
279                 } else if (imgu_pipe->queue_enabled[node]) {
280                         struct imgu_css_buffer *buf =
281                                 imgu_queue_getbuf(imgu, node, pipe);
282                         struct imgu_buffer *ibuf = NULL;
283                         bool dummy;
284
285                         if (!buf)
286                                 break;
287
288                         r = imgu_css_buf_queue(&imgu->css, pipe, buf);
289                         if (r)
290                                 break;
291                         dummy = imgu_dummybufs_check(imgu, buf, pipe);
292                         if (!dummy)
293                                 ibuf = container_of(buf, struct imgu_buffer,
294                                                     css_buf);
295                         dev_dbg(&imgu->pci_dev->dev,
296                                 "queue %s %s buffer %u to css da: 0x%08x\n",
297                                 dummy ? "dummy" : "user",
298                                 imgu_node_map[node].name,
299                                 dummy ? 0 : ibuf->vid_buf.vbb.vb2_buf.index,
300                                 (u32)buf->daddr);
301                 }
302         }
303         mutex_unlock(&imgu->lock);
304
305         if (r && r != -EBUSY)
306                 goto failed;
307
308         return 0;
309
310 failed:
311         /*
312          * On error, mark all buffers as failed which are not
313          * yet queued to CSS
314          */
315         dev_err(&imgu->pci_dev->dev,
316                 "failed to queue buffer to CSS on queue %i (%d)\n",
317                 node, r);
318
319         if (initial)
320                 /* If we were called from streamon(), no need to finish bufs */
321                 return r;
322
323         for (node = 0; node < IMGU_NODE_NUM; node++) {
324                 struct imgu_buffer *buf, *buf0;
325
326                 if (!imgu_pipe->queue_enabled[node])
327                         continue;       /* Skip disabled queues */
328
329                 mutex_lock(&imgu->lock);
330                 list_for_each_entry_safe(buf, buf0,
331                                          &imgu_pipe->nodes[node].buffers,
332                                          vid_buf.list) {
333                         if (imgu_css_buf_state(&buf->css_buf) ==
334                             IPU3_CSS_BUFFER_QUEUED)
335                                 continue;       /* Was already queued, skip */
336
337                         imgu_v4l2_buffer_done(&buf->vid_buf.vbb.vb2_buf,
338                                               VB2_BUF_STATE_ERROR);
339                 }
340                 mutex_unlock(&imgu->lock);
341         }
342
343         return r;
344 }
345
346 static int imgu_powerup(struct imgu_device *imgu)
347 {
348         int r;
349         unsigned int pipe;
350         unsigned int freq = 200;
351         struct v4l2_mbus_framefmt *fmt;
352
353         /* input larger than 2048*1152, ask imgu to work on high freq */
354         for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
355                 fmt = &imgu->imgu_pipe[pipe].nodes[IMGU_NODE_IN].pad_fmt;
356                 dev_dbg(&imgu->pci_dev->dev, "pipe %u input format = %ux%u",
357                         pipe, fmt->width, fmt->height);
358                 if ((fmt->width * fmt->height) >= (2048 * 1152))
359                         freq = 450;
360         }
361
362         r = imgu_css_set_powerup(&imgu->pci_dev->dev, imgu->base, freq);
363         if (r)
364                 return r;
365
366         imgu_mmu_resume(imgu->mmu);
367         return 0;
368 }
369
370 static void imgu_powerdown(struct imgu_device *imgu)
371 {
372         imgu_mmu_suspend(imgu->mmu);
373         imgu_css_set_powerdown(&imgu->pci_dev->dev, imgu->base);
374 }
375
376 int imgu_s_stream(struct imgu_device *imgu, int enable)
377 {
378         struct device *dev = &imgu->pci_dev->dev;
379         int r, pipe;
380
381         if (!enable) {
382                 /* Stop streaming */
383                 dev_dbg(dev, "stream off\n");
384                 /* Block new buffers to be queued to CSS. */
385                 atomic_set(&imgu->qbuf_barrier, 1);
386                 imgu_css_stop_streaming(&imgu->css);
387                 synchronize_irq(imgu->pci_dev->irq);
388                 atomic_set(&imgu->qbuf_barrier, 0);
389                 imgu_powerdown(imgu);
390                 pm_runtime_put(&imgu->pci_dev->dev);
391
392                 return 0;
393         }
394
395         /* Set Power */
396         r = pm_runtime_resume_and_get(dev);
397         if (r < 0) {
398                 dev_err(dev, "failed to set imgu power\n");
399                 return r;
400         }
401
402         r = imgu_powerup(imgu);
403         if (r) {
404                 dev_err(dev, "failed to power up imgu\n");
405                 pm_runtime_put(dev);
406                 return r;
407         }
408
409         /* Start CSS streaming */
410         r = imgu_css_start_streaming(&imgu->css);
411         if (r) {
412                 dev_err(dev, "failed to start css streaming (%d)", r);
413                 goto fail_start_streaming;
414         }
415
416         for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
417                 /* Initialize dummy buffers */
418                 r = imgu_dummybufs_init(imgu, pipe);
419                 if (r) {
420                         dev_err(dev, "failed to initialize dummy buffers (%d)", r);
421                         goto fail_dummybufs;
422                 }
423
424                 /* Queue as many buffers from queue as possible */
425                 r = imgu_queue_buffers(imgu, true, pipe);
426                 if (r) {
427                         dev_err(dev, "failed to queue initial buffers (%d)", r);
428                         goto fail_queueing;
429                 }
430         }
431
432         return 0;
433 fail_queueing:
434         for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
435                 imgu_dummybufs_cleanup(imgu, pipe);
436 fail_dummybufs:
437         imgu_css_stop_streaming(&imgu->css);
438 fail_start_streaming:
439         pm_runtime_put(dev);
440
441         return r;
442 }
443
444 static void imgu_video_nodes_exit(struct imgu_device *imgu)
445 {
446         int i;
447
448         for (i = 0; i < IMGU_MAX_PIPE_NUM; i++)
449                 imgu_dummybufs_cleanup(imgu, i);
450
451         imgu_v4l2_unregister(imgu);
452 }
453
454 static int imgu_video_nodes_init(struct imgu_device *imgu)
455 {
456         struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
457         struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
458         struct imgu_media_pipe *imgu_pipe;
459         unsigned int i, j;
460         int r;
461
462         imgu->buf_struct_size = sizeof(struct imgu_buffer);
463
464         for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) {
465                 imgu_pipe = &imgu->imgu_pipe[j];
466
467                 for (i = 0; i < IMGU_NODE_NUM; i++) {
468                         imgu_pipe->nodes[i].name = imgu_node_map[i].name;
469                         imgu_pipe->nodes[i].output = i < IMGU_QUEUE_FIRST_INPUT;
470                         imgu_pipe->nodes[i].enabled = false;
471
472                         if (i != IMGU_NODE_PARAMS && i != IMGU_NODE_STAT_3A)
473                                 fmts[imgu_node_map[i].css_queue] =
474                                         &imgu_pipe->nodes[i].vdev_fmt.fmt.pix_mp;
475                         atomic_set(&imgu_pipe->nodes[i].sequence, 0);
476                 }
477         }
478
479         r = imgu_v4l2_register(imgu);
480         if (r)
481                 return r;
482
483         /* Set initial formats and initialize formats of video nodes */
484         for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) {
485                 imgu_pipe = &imgu->imgu_pipe[j];
486
487                 rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_pipe->imgu_sd.rect.eff;
488                 rects[IPU3_CSS_RECT_BDS] = &imgu_pipe->imgu_sd.rect.bds;
489                 imgu_css_fmt_set(&imgu->css, fmts, rects, j);
490
491                 /* Pre-allocate dummy buffers */
492                 r = imgu_dummybufs_preallocate(imgu, j);
493                 if (r) {
494                         dev_err(&imgu->pci_dev->dev,
495                                 "failed to pre-allocate dummy buffers (%d)", r);
496                         goto out_cleanup;
497                 }
498         }
499
500         return 0;
501
502 out_cleanup:
503         imgu_video_nodes_exit(imgu);
504
505         return r;
506 }
507
508 /**************** PCI interface ****************/
509
510 static irqreturn_t imgu_isr_threaded(int irq, void *imgu_ptr)
511 {
512         struct imgu_device *imgu = imgu_ptr;
513         struct imgu_media_pipe *imgu_pipe;
514         int p;
515
516         /* Dequeue / queue buffers */
517         do {
518                 u64 ns = ktime_get_ns();
519                 struct imgu_css_buffer *b;
520                 struct imgu_buffer *buf = NULL;
521                 unsigned int node, pipe;
522                 bool dummy;
523
524                 do {
525                         mutex_lock(&imgu->lock);
526                         b = imgu_css_buf_dequeue(&imgu->css);
527                         mutex_unlock(&imgu->lock);
528                 } while (PTR_ERR(b) == -EAGAIN);
529
530                 if (IS_ERR(b)) {
531                         if (PTR_ERR(b) != -EBUSY)       /* All done */
532                                 dev_err(&imgu->pci_dev->dev,
533                                         "failed to dequeue buffers (%ld)\n",
534                                         PTR_ERR(b));
535                         break;
536                 }
537
538                 node = imgu_map_node(imgu, b->queue);
539                 pipe = b->pipe;
540                 dummy = imgu_dummybufs_check(imgu, b, pipe);
541                 if (!dummy)
542                         buf = container_of(b, struct imgu_buffer, css_buf);
543                 dev_dbg(&imgu->pci_dev->dev,
544                         "dequeue %s %s buffer %d daddr 0x%x from css\n",
545                         dummy ? "dummy" : "user",
546                         imgu_node_map[node].name,
547                         dummy ? 0 : buf->vid_buf.vbb.vb2_buf.index,
548                         (u32)b->daddr);
549
550                 if (dummy)
551                         /* It was a dummy buffer, skip it */
552                         continue;
553
554                 /* Fill vb2 buffer entries and tell it's ready */
555                 imgu_pipe = &imgu->imgu_pipe[pipe];
556                 if (!imgu_pipe->nodes[node].output) {
557                         buf->vid_buf.vbb.vb2_buf.timestamp = ns;
558                         buf->vid_buf.vbb.field = V4L2_FIELD_NONE;
559                         buf->vid_buf.vbb.sequence =
560                                 atomic_inc_return(
561                                 &imgu_pipe->nodes[node].sequence);
562                         dev_dbg(&imgu->pci_dev->dev, "vb2 buffer sequence %d",
563                                 buf->vid_buf.vbb.sequence);
564                 }
565                 imgu_buffer_done(imgu, &buf->vid_buf.vbb.vb2_buf,
566                                  imgu_css_buf_state(&buf->css_buf) ==
567                                                     IPU3_CSS_BUFFER_DONE ?
568                                                     VB2_BUF_STATE_DONE :
569                                                     VB2_BUF_STATE_ERROR);
570                 mutex_lock(&imgu->lock);
571                 if (imgu_css_queue_empty(&imgu->css))
572                         wake_up_all(&imgu->buf_drain_wq);
573                 mutex_unlock(&imgu->lock);
574         } while (1);
575
576         /*
577          * Try to queue more buffers for CSS.
578          * qbuf_barrier is used to disable new buffers
579          * to be queued to CSS.
580          */
581         if (!atomic_read(&imgu->qbuf_barrier))
582                 for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
583                         imgu_queue_buffers(imgu, false, p);
584
585         return IRQ_HANDLED;
586 }
587
588 static irqreturn_t imgu_isr(int irq, void *imgu_ptr)
589 {
590         struct imgu_device *imgu = imgu_ptr;
591
592         /* acknowledge interruption */
593         if (imgu_css_irq_ack(&imgu->css) < 0)
594                 return IRQ_NONE;
595
596         return IRQ_WAKE_THREAD;
597 }
598
599 static int imgu_pci_config_setup(struct pci_dev *dev)
600 {
601         u16 pci_command;
602         int r = pci_enable_msi(dev);
603
604         if (r) {
605                 dev_err(&dev->dev, "failed to enable MSI (%d)\n", r);
606                 return r;
607         }
608
609         pci_read_config_word(dev, PCI_COMMAND, &pci_command);
610         pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
611                         PCI_COMMAND_INTX_DISABLE;
612         pci_write_config_word(dev, PCI_COMMAND, pci_command);
613
614         return 0;
615 }
616
617 static int imgu_pci_probe(struct pci_dev *pci_dev,
618                           const struct pci_device_id *id)
619 {
620         struct imgu_device *imgu;
621         phys_addr_t phys;
622         unsigned long phys_len;
623         void __iomem *const *iomap;
624         int r;
625
626         imgu = devm_kzalloc(&pci_dev->dev, sizeof(*imgu), GFP_KERNEL);
627         if (!imgu)
628                 return -ENOMEM;
629
630         imgu->pci_dev = pci_dev;
631
632         r = pcim_enable_device(pci_dev);
633         if (r) {
634                 dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
635                 return r;
636         }
637
638         dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
639                  pci_dev->device, pci_dev->revision);
640
641         phys = pci_resource_start(pci_dev, IMGU_PCI_BAR);
642         phys_len = pci_resource_len(pci_dev, IMGU_PCI_BAR);
643
644         r = pcim_iomap_regions(pci_dev, 1 << IMGU_PCI_BAR, pci_name(pci_dev));
645         if (r) {
646                 dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
647                 return r;
648         }
649         dev_info(&pci_dev->dev, "physical base address %pap, %lu bytes\n",
650                  &phys, phys_len);
651
652         iomap = pcim_iomap_table(pci_dev);
653         if (!iomap) {
654                 dev_err(&pci_dev->dev, "failed to iomap table\n");
655                 return -ENODEV;
656         }
657
658         imgu->base = iomap[IMGU_PCI_BAR];
659
660         pci_set_drvdata(pci_dev, imgu);
661
662         pci_set_master(pci_dev);
663
664         r = dma_coerce_mask_and_coherent(&pci_dev->dev, IMGU_DMA_MASK);
665         if (r) {
666                 dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
667                 return -ENODEV;
668         }
669
670         r = imgu_pci_config_setup(pci_dev);
671         if (r)
672                 return r;
673
674         mutex_init(&imgu->lock);
675         mutex_init(&imgu->streaming_lock);
676         atomic_set(&imgu->qbuf_barrier, 0);
677         init_waitqueue_head(&imgu->buf_drain_wq);
678
679         r = imgu_css_set_powerup(&pci_dev->dev, imgu->base, 200);
680         if (r) {
681                 dev_err(&pci_dev->dev,
682                         "failed to power up CSS (%d)\n", r);
683                 goto out_mutex_destroy;
684         }
685
686         imgu->mmu = imgu_mmu_init(&pci_dev->dev, imgu->base);
687         if (IS_ERR(imgu->mmu)) {
688                 r = PTR_ERR(imgu->mmu);
689                 dev_err(&pci_dev->dev, "failed to initialize MMU (%d)\n", r);
690                 goto out_css_powerdown;
691         }
692
693         r = imgu_dmamap_init(imgu);
694         if (r) {
695                 dev_err(&pci_dev->dev,
696                         "failed to initialize DMA mapping (%d)\n", r);
697                 goto out_mmu_exit;
698         }
699
700         /* ISP programming */
701         r = imgu_css_init(&pci_dev->dev, &imgu->css, imgu->base, phys_len);
702         if (r) {
703                 dev_err(&pci_dev->dev, "failed to initialize CSS (%d)\n", r);
704                 goto out_dmamap_exit;
705         }
706
707         /* v4l2 sub-device registration */
708         r = imgu_video_nodes_init(imgu);
709         if (r) {
710                 dev_err(&pci_dev->dev, "failed to create V4L2 devices (%d)\n",
711                         r);
712                 goto out_css_cleanup;
713         }
714
715         r = devm_request_threaded_irq(&pci_dev->dev, pci_dev->irq,
716                                       imgu_isr, imgu_isr_threaded,
717                                       IRQF_SHARED, IMGU_NAME, imgu);
718         if (r) {
719                 dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
720                 goto out_video_exit;
721         }
722
723         pm_runtime_put_noidle(&pci_dev->dev);
724         pm_runtime_allow(&pci_dev->dev);
725
726         return 0;
727
728 out_video_exit:
729         imgu_video_nodes_exit(imgu);
730 out_css_cleanup:
731         imgu_css_cleanup(&imgu->css);
732 out_dmamap_exit:
733         imgu_dmamap_exit(imgu);
734 out_mmu_exit:
735         imgu_mmu_exit(imgu->mmu);
736 out_css_powerdown:
737         imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
738 out_mutex_destroy:
739         mutex_destroy(&imgu->streaming_lock);
740         mutex_destroy(&imgu->lock);
741
742         return r;
743 }
744
745 static void imgu_pci_remove(struct pci_dev *pci_dev)
746 {
747         struct imgu_device *imgu = pci_get_drvdata(pci_dev);
748
749         pm_runtime_forbid(&pci_dev->dev);
750         pm_runtime_get_noresume(&pci_dev->dev);
751
752         imgu_video_nodes_exit(imgu);
753         imgu_css_cleanup(&imgu->css);
754         imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
755         imgu_dmamap_exit(imgu);
756         imgu_mmu_exit(imgu->mmu);
757         mutex_destroy(&imgu->streaming_lock);
758         mutex_destroy(&imgu->lock);
759 }
760
761 static int __maybe_unused imgu_suspend(struct device *dev)
762 {
763         struct pci_dev *pci_dev = to_pci_dev(dev);
764         struct imgu_device *imgu = pci_get_drvdata(pci_dev);
765
766         imgu->suspend_in_stream = imgu_css_is_streaming(&imgu->css);
767         if (!imgu->suspend_in_stream)
768                 goto out;
769         /* Block new buffers to be queued to CSS. */
770         atomic_set(&imgu->qbuf_barrier, 1);
771         /*
772          * Wait for currently running irq handler to be done so that
773          * no new buffers will be queued to fw later.
774          */
775         synchronize_irq(pci_dev->irq);
776         /* Wait until all buffers in CSS are done. */
777         if (!wait_event_timeout(imgu->buf_drain_wq,
778             imgu_css_queue_empty(&imgu->css), msecs_to_jiffies(1000)))
779                 dev_err(dev, "wait buffer drain timeout.\n");
780
781         imgu_css_stop_streaming(&imgu->css);
782         atomic_set(&imgu->qbuf_barrier, 0);
783         imgu_powerdown(imgu);
784         pm_runtime_force_suspend(dev);
785 out:
786         return 0;
787 }
788
789 static int __maybe_unused imgu_resume(struct device *dev)
790 {
791         struct imgu_device *imgu = dev_get_drvdata(dev);
792         int r = 0;
793         unsigned int pipe;
794
795         if (!imgu->suspend_in_stream)
796                 goto out;
797
798         pm_runtime_force_resume(dev);
799
800         r = imgu_powerup(imgu);
801         if (r) {
802                 dev_err(dev, "failed to power up imgu\n");
803                 goto out;
804         }
805
806         /* Start CSS streaming */
807         r = imgu_css_start_streaming(&imgu->css);
808         if (r) {
809                 dev_err(dev, "failed to resume css streaming (%d)", r);
810                 goto out;
811         }
812
813         for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
814                 r = imgu_queue_buffers(imgu, true, pipe);
815                 if (r)
816                         dev_err(dev, "failed to queue buffers to pipe %d (%d)",
817                                 pipe, r);
818         }
819
820 out:
821         return r;
822 }
823
824 /*
825  * PCI rpm framework checks the existence of driver rpm callbacks.
826  * Place a dummy callback here to avoid rpm going into error state.
827  */
828 static __maybe_unused int imgu_rpm_dummy_cb(struct device *dev)
829 {
830         return 0;
831 }
832
833 static const struct dev_pm_ops imgu_pm_ops = {
834         SET_RUNTIME_PM_OPS(&imgu_rpm_dummy_cb, &imgu_rpm_dummy_cb, NULL)
835         SET_SYSTEM_SLEEP_PM_OPS(&imgu_suspend, &imgu_resume)
836 };
837
838 static const struct pci_device_id imgu_pci_tbl[] = {
839         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, IMGU_PCI_ID) },
840         { 0, }
841 };
842
843 MODULE_DEVICE_TABLE(pci, imgu_pci_tbl);
844
845 static struct pci_driver imgu_pci_driver = {
846         .name = IMGU_NAME,
847         .id_table = imgu_pci_tbl,
848         .probe = imgu_pci_probe,
849         .remove = imgu_pci_remove,
850         .driver = {
851                 .pm = &imgu_pm_ops,
852         },
853 };
854
855 module_pci_driver(imgu_pci_driver);
856
857 MODULE_AUTHOR("Tuukka Toivonen");
858 MODULE_AUTHOR("Tianshu Qiu <[email protected]>");
859 MODULE_AUTHOR("Jian Xu Zheng");
860 MODULE_AUTHOR("Yuning Pu");
861 MODULE_AUTHOR("Yong Zhi <[email protected]>");
862 MODULE_LICENSE("GPL v2");
863 MODULE_DESCRIPTION("Intel ipu3_imgu PCI driver");
864 MODULE_FIRMWARE(IMGU_FW_NAME);
865 MODULE_FIRMWARE(IMGU_FW_NAME_20161208);
866 MODULE_FIRMWARE(IMGU_FW_NAME_IPU_20161208);
This page took 0.084881 seconds and 4 git commands to generate.