]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
Merge branch 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux into drm...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/kthread.h>
29 #include <linux/console.h>
30 #include <linux/slab.h>
31 #include <linux/debugfs.h>
32 #include <drm/drmP.h>
33 #include <drm/drm_crtc_helper.h>
34 #include <drm/amdgpu_drm.h>
35 #include <linux/vgaarb.h>
36 #include <linux/vga_switcheroo.h>
37 #include <linux/efi.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40 #include "amdgpu_i2c.h"
41 #include "atom.h"
42 #include "amdgpu_atombios.h"
43 #include "amdgpu_atomfirmware.h"
44 #include "amd_pcie.h"
45 #ifdef CONFIG_DRM_AMDGPU_SI
46 #include "si.h"
47 #endif
48 #ifdef CONFIG_DRM_AMDGPU_CIK
49 #include "cik.h"
50 #endif
51 #include "vi.h"
52 #include "soc15.h"
53 #include "bif/bif_4_1_d.h"
54 #include <linux/pci.h>
55 #include <linux/firmware.h>
56 #include "amdgpu_vf_error.h"
57
58 #include "amdgpu_amdkfd.h"
59 #include "amdgpu_pm.h"
60
61 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
62 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
63
64 #define AMDGPU_RESUME_MS                2000
65
66 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
67 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
68 static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
69 static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
70
71 static const char *amdgpu_asic_name[] = {
72         "TAHITI",
73         "PITCAIRN",
74         "VERDE",
75         "OLAND",
76         "HAINAN",
77         "BONAIRE",
78         "KAVERI",
79         "KABINI",
80         "HAWAII",
81         "MULLINS",
82         "TOPAZ",
83         "TONGA",
84         "FIJI",
85         "CARRIZO",
86         "STONEY",
87         "POLARIS10",
88         "POLARIS11",
89         "POLARIS12",
90         "VEGA10",
91         "RAVEN",
92         "LAST",
93 };
94
95 bool amdgpu_device_is_px(struct drm_device *dev)
96 {
97         struct amdgpu_device *adev = dev->dev_private;
98
99         if (adev->flags & AMD_IS_PX)
100                 return true;
101         return false;
102 }
103
104 /*
105  * MMIO register access helper functions.
106  */
107 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
108                         uint32_t acc_flags)
109 {
110         uint32_t ret;
111
112         if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
113                 BUG_ON(in_interrupt());
114                 return amdgpu_virt_kiq_rreg(adev, reg);
115         }
116
117         if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
118                 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
119         else {
120                 unsigned long flags;
121
122                 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
123                 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
124                 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
125                 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
126         }
127         trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
128         return ret;
129 }
130
131 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
132                     uint32_t acc_flags)
133 {
134         trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
135
136         if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
137                 adev->last_mm_index = v;
138         }
139
140         if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
141                 BUG_ON(in_interrupt());
142                 return amdgpu_virt_kiq_wreg(adev, reg, v);
143         }
144
145         if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
146                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
147         else {
148                 unsigned long flags;
149
150                 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
151                 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
152                 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
153                 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
154         }
155
156         if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
157                 udelay(500);
158         }
159 }
160
161 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
162 {
163         if ((reg * 4) < adev->rio_mem_size)
164                 return ioread32(adev->rio_mem + (reg * 4));
165         else {
166                 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
167                 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
168         }
169 }
170
171 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
172 {
173         if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
174                 adev->last_mm_index = v;
175         }
176
177         if ((reg * 4) < adev->rio_mem_size)
178                 iowrite32(v, adev->rio_mem + (reg * 4));
179         else {
180                 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
181                 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
182         }
183
184         if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
185                 udelay(500);
186         }
187 }
188
189 /**
190  * amdgpu_mm_rdoorbell - read a doorbell dword
191  *
192  * @adev: amdgpu_device pointer
193  * @index: doorbell index
194  *
195  * Returns the value in the doorbell aperture at the
196  * requested doorbell index (CIK).
197  */
198 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
199 {
200         if (index < adev->doorbell.num_doorbells) {
201                 return readl(adev->doorbell.ptr + index);
202         } else {
203                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
204                 return 0;
205         }
206 }
207
208 /**
209  * amdgpu_mm_wdoorbell - write a doorbell dword
210  *
211  * @adev: amdgpu_device pointer
212  * @index: doorbell index
213  * @v: value to write
214  *
215  * Writes @v to the doorbell aperture at the
216  * requested doorbell index (CIK).
217  */
218 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
219 {
220         if (index < adev->doorbell.num_doorbells) {
221                 writel(v, adev->doorbell.ptr + index);
222         } else {
223                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
224         }
225 }
226
227 /**
228  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
229  *
230  * @adev: amdgpu_device pointer
231  * @index: doorbell index
232  *
233  * Returns the value in the doorbell aperture at the
234  * requested doorbell index (VEGA10+).
235  */
236 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
237 {
238         if (index < adev->doorbell.num_doorbells) {
239                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
240         } else {
241                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
242                 return 0;
243         }
244 }
245
246 /**
247  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
248  *
249  * @adev: amdgpu_device pointer
250  * @index: doorbell index
251  * @v: value to write
252  *
253  * Writes @v to the doorbell aperture at the
254  * requested doorbell index (VEGA10+).
255  */
256 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
257 {
258         if (index < adev->doorbell.num_doorbells) {
259                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
260         } else {
261                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
262         }
263 }
264
265 /**
266  * amdgpu_invalid_rreg - dummy reg read function
267  *
268  * @adev: amdgpu device pointer
269  * @reg: offset of register
270  *
271  * Dummy register read function.  Used for register blocks
272  * that certain asics don't have (all asics).
273  * Returns the value in the register.
274  */
275 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
276 {
277         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
278         BUG();
279         return 0;
280 }
281
282 /**
283  * amdgpu_invalid_wreg - dummy reg write function
284  *
285  * @adev: amdgpu device pointer
286  * @reg: offset of register
287  * @v: value to write to the register
288  *
289  * Dummy register read function.  Used for register blocks
290  * that certain asics don't have (all asics).
291  */
292 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
293 {
294         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
295                   reg, v);
296         BUG();
297 }
298
299 /**
300  * amdgpu_block_invalid_rreg - dummy reg read function
301  *
302  * @adev: amdgpu device pointer
303  * @block: offset of instance
304  * @reg: offset of register
305  *
306  * Dummy register read function.  Used for register blocks
307  * that certain asics don't have (all asics).
308  * Returns the value in the register.
309  */
310 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
311                                           uint32_t block, uint32_t reg)
312 {
313         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
314                   reg, block);
315         BUG();
316         return 0;
317 }
318
319 /**
320  * amdgpu_block_invalid_wreg - dummy reg write function
321  *
322  * @adev: amdgpu device pointer
323  * @block: offset of instance
324  * @reg: offset of register
325  * @v: value to write to the register
326  *
327  * Dummy register read function.  Used for register blocks
328  * that certain asics don't have (all asics).
329  */
330 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
331                                       uint32_t block,
332                                       uint32_t reg, uint32_t v)
333 {
334         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
335                   reg, block, v);
336         BUG();
337 }
338
339 static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
340 {
341         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
342                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
343                                        &adev->vram_scratch.robj,
344                                        &adev->vram_scratch.gpu_addr,
345                                        (void **)&adev->vram_scratch.ptr);
346 }
347
348 static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
349 {
350         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
351 }
352
353 /**
354  * amdgpu_program_register_sequence - program an array of registers.
355  *
356  * @adev: amdgpu_device pointer
357  * @registers: pointer to the register array
358  * @array_size: size of the register array
359  *
360  * Programs an array or registers with and and or masks.
361  * This is a helper for setting golden registers.
362  */
363 void amdgpu_program_register_sequence(struct amdgpu_device *adev,
364                                       const u32 *registers,
365                                       const u32 array_size)
366 {
367         u32 tmp, reg, and_mask, or_mask;
368         int i;
369
370         if (array_size % 3)
371                 return;
372
373         for (i = 0; i < array_size; i +=3) {
374                 reg = registers[i + 0];
375                 and_mask = registers[i + 1];
376                 or_mask = registers[i + 2];
377
378                 if (and_mask == 0xffffffff) {
379                         tmp = or_mask;
380                 } else {
381                         tmp = RREG32(reg);
382                         tmp &= ~and_mask;
383                         tmp |= or_mask;
384                 }
385                 WREG32(reg, tmp);
386         }
387 }
388
389 void amdgpu_pci_config_reset(struct amdgpu_device *adev)
390 {
391         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
392 }
393
394 /*
395  * GPU doorbell aperture helpers function.
396  */
397 /**
398  * amdgpu_doorbell_init - Init doorbell driver information.
399  *
400  * @adev: amdgpu_device pointer
401  *
402  * Init doorbell driver information (CIK)
403  * Returns 0 on success, error on failure.
404  */
405 static int amdgpu_doorbell_init(struct amdgpu_device *adev)
406 {
407         /* No doorbell on SI hardware generation */
408         if (adev->asic_type < CHIP_BONAIRE) {
409                 adev->doorbell.base = 0;
410                 adev->doorbell.size = 0;
411                 adev->doorbell.num_doorbells = 0;
412                 adev->doorbell.ptr = NULL;
413                 return 0;
414         }
415
416         /* doorbell bar mapping */
417         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
418         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
419
420         adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
421                                              AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
422         if (adev->doorbell.num_doorbells == 0)
423                 return -EINVAL;
424
425         adev->doorbell.ptr = ioremap(adev->doorbell.base,
426                                      adev->doorbell.num_doorbells *
427                                      sizeof(u32));
428         if (adev->doorbell.ptr == NULL)
429                 return -ENOMEM;
430
431         return 0;
432 }
433
434 /**
435  * amdgpu_doorbell_fini - Tear down doorbell driver information.
436  *
437  * @adev: amdgpu_device pointer
438  *
439  * Tear down doorbell driver information (CIK)
440  */
441 static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
442 {
443         iounmap(adev->doorbell.ptr);
444         adev->doorbell.ptr = NULL;
445 }
446
447 /**
448  * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
449  *                                setup amdkfd
450  *
451  * @adev: amdgpu_device pointer
452  * @aperture_base: output returning doorbell aperture base physical address
453  * @aperture_size: output returning doorbell aperture size in bytes
454  * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
455  *
456  * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
457  * takes doorbells required for its own rings and reports the setup to amdkfd.
458  * amdgpu reserved doorbells are at the start of the doorbell aperture.
459  */
460 void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
461                                 phys_addr_t *aperture_base,
462                                 size_t *aperture_size,
463                                 size_t *start_offset)
464 {
465         /*
466          * The first num_doorbells are used by amdgpu.
467          * amdkfd takes whatever's left in the aperture.
468          */
469         if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
470                 *aperture_base = adev->doorbell.base;
471                 *aperture_size = adev->doorbell.size;
472                 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
473         } else {
474                 *aperture_base = 0;
475                 *aperture_size = 0;
476                 *start_offset = 0;
477         }
478 }
479
480 /*
481  * amdgpu_wb_*()
482  * Writeback is the method by which the GPU updates special pages in memory
483  * with the status of certain GPU events (fences, ring pointers,etc.).
484  */
485
486 /**
487  * amdgpu_wb_fini - Disable Writeback and free memory
488  *
489  * @adev: amdgpu_device pointer
490  *
491  * Disables Writeback and frees the Writeback memory (all asics).
492  * Used at driver shutdown.
493  */
494 static void amdgpu_wb_fini(struct amdgpu_device *adev)
495 {
496         if (adev->wb.wb_obj) {
497                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
498                                       &adev->wb.gpu_addr,
499                                       (void **)&adev->wb.wb);
500                 adev->wb.wb_obj = NULL;
501         }
502 }
503
504 /**
505  * amdgpu_wb_init- Init Writeback driver info and allocate memory
506  *
507  * @adev: amdgpu_device pointer
508  *
509  * Initializes writeback and allocates writeback memory (all asics).
510  * Used at driver startup.
511  * Returns 0 on success or an -error on failure.
512  */
513 static int amdgpu_wb_init(struct amdgpu_device *adev)
514 {
515         int r;
516
517         if (adev->wb.wb_obj == NULL) {
518                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
519                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
520                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
521                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
522                                             (void **)&adev->wb.wb);
523                 if (r) {
524                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
525                         return r;
526                 }
527
528                 adev->wb.num_wb = AMDGPU_MAX_WB;
529                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
530
531                 /* clear wb memory */
532                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
533         }
534
535         return 0;
536 }
537
538 /**
539  * amdgpu_wb_get - Allocate a wb entry
540  *
541  * @adev: amdgpu_device pointer
542  * @wb: wb index
543  *
544  * Allocate a wb slot for use by the driver (all asics).
545  * Returns 0 on success or -EINVAL on failure.
546  */
547 int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
548 {
549         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
550
551         if (offset < adev->wb.num_wb) {
552                 __set_bit(offset, adev->wb.used);
553                 *wb = offset * 8; /* convert to dw offset */
554                 return 0;
555         } else {
556                 return -EINVAL;
557         }
558 }
559
560 /**
561  * amdgpu_wb_free - Free a wb entry
562  *
563  * @adev: amdgpu_device pointer
564  * @wb: wb index
565  *
566  * Free a wb slot allocated for use by the driver (all asics)
567  */
568 void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
569 {
570         if (wb < adev->wb.num_wb)
571                 __clear_bit(wb, adev->wb.used);
572 }
573
574 /**
575  * amdgpu_vram_location - try to find VRAM location
576  * @adev: amdgpu device structure holding all necessary informations
577  * @mc: memory controller structure holding memory informations
578  * @base: base address at which to put VRAM
579  *
580  * Function will try to place VRAM at base address provided
581  * as parameter (which is so far either PCI aperture address or
582  * for IGP TOM base address).
583  *
584  * If there is not enough space to fit the unvisible VRAM in the 32bits
585  * address space then we limit the VRAM size to the aperture.
586  *
587  * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
588  * this shouldn't be a problem as we are using the PCI aperture as a reference.
589  * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
590  * not IGP.
591  *
592  * Note: we use mc_vram_size as on some board we need to program the mc to
593  * cover the whole aperture even if VRAM size is inferior to aperture size
594  * Novell bug 204882 + along with lots of ubuntu ones
595  *
596  * Note: when limiting vram it's safe to overwritte real_vram_size because
597  * we are not in case where real_vram_size is inferior to mc_vram_size (ie
598  * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
599  * ones)
600  *
601  * Note: IGP TOM addr should be the same as the aperture addr, we don't
602  * explicitly check for that though.
603  *
604  * FIXME: when reducing VRAM size align new size on power of 2.
605  */
606 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
607 {
608         uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
609
610         mc->vram_start = base;
611         if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
612                 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
613                 mc->real_vram_size = mc->aper_size;
614                 mc->mc_vram_size = mc->aper_size;
615         }
616         mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
617         if (limit && limit < mc->real_vram_size)
618                 mc->real_vram_size = limit;
619         dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
620                         mc->mc_vram_size >> 20, mc->vram_start,
621                         mc->vram_end, mc->real_vram_size >> 20);
622 }
623
624 /**
625  * amdgpu_gart_location - try to find GTT location
626  * @adev: amdgpu device structure holding all necessary informations
627  * @mc: memory controller structure holding memory informations
628  *
629  * Function will place try to place GTT before or after VRAM.
630  *
631  * If GTT size is bigger than space left then we ajust GTT size.
632  * Thus function will never fails.
633  *
634  * FIXME: when reducing GTT size align new size on power of 2.
635  */
636 void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
637 {
638         u64 size_af, size_bf;
639
640         size_af = adev->mc.mc_mask - mc->vram_end;
641         size_bf = mc->vram_start;
642         if (size_bf > size_af) {
643                 if (mc->gart_size > size_bf) {
644                         dev_warn(adev->dev, "limiting GTT\n");
645                         mc->gart_size = size_bf;
646                 }
647                 mc->gart_start = 0;
648         } else {
649                 if (mc->gart_size > size_af) {
650                         dev_warn(adev->dev, "limiting GTT\n");
651                         mc->gart_size = size_af;
652                 }
653                 mc->gart_start = mc->vram_end + 1;
654         }
655         mc->gart_end = mc->gart_start + mc->gart_size - 1;
656         dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
657                         mc->gart_size >> 20, mc->gart_start, mc->gart_end);
658 }
659
660 /*
661  * GPU helpers function.
662  */
663 /**
664  * amdgpu_need_post - check if the hw need post or not
665  *
666  * @adev: amdgpu_device pointer
667  *
668  * Check if the asic has been initialized (all asics) at driver startup
669  * or post is needed if  hw reset is performed.
670  * Returns true if need or false if not.
671  */
672 bool amdgpu_need_post(struct amdgpu_device *adev)
673 {
674         uint32_t reg;
675
676         if (adev->has_hw_reset) {
677                 adev->has_hw_reset = false;
678                 return true;
679         }
680
681         /* bios scratch used on CIK+ */
682         if (adev->asic_type >= CHIP_BONAIRE)
683                 return amdgpu_atombios_scratch_need_asic_init(adev);
684
685         /* check MEM_SIZE for older asics */
686         reg = amdgpu_asic_get_config_memsize(adev);
687
688         if ((reg != 0) && (reg != 0xffffffff))
689                 return false;
690
691         return true;
692
693 }
694
695 static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
696 {
697         if (amdgpu_sriov_vf(adev))
698                 return false;
699
700         if (amdgpu_passthrough(adev)) {
701                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
702                  * some old smc fw still need driver do vPost otherwise gpu hang, while
703                  * those smc fw version above 22.15 doesn't have this flaw, so we force
704                  * vpost executed for smc version below 22.15
705                  */
706                 if (adev->asic_type == CHIP_FIJI) {
707                         int err;
708                         uint32_t fw_ver;
709                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
710                         /* force vPost if error occured */
711                         if (err)
712                                 return true;
713
714                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
715                         if (fw_ver < 0x00160e00)
716                                 return true;
717                 }
718         }
719         return amdgpu_need_post(adev);
720 }
721
722 /**
723  * amdgpu_dummy_page_init - init dummy page used by the driver
724  *
725  * @adev: amdgpu_device pointer
726  *
727  * Allocate the dummy page used by the driver (all asics).
728  * This dummy page is used by the driver as a filler for gart entries
729  * when pages are taken out of the GART
730  * Returns 0 on sucess, -ENOMEM on failure.
731  */
732 int amdgpu_dummy_page_init(struct amdgpu_device *adev)
733 {
734         if (adev->dummy_page.page)
735                 return 0;
736         adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
737         if (adev->dummy_page.page == NULL)
738                 return -ENOMEM;
739         adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
740                                         0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
741         if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
742                 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
743                 __free_page(adev->dummy_page.page);
744                 adev->dummy_page.page = NULL;
745                 return -ENOMEM;
746         }
747         return 0;
748 }
749
750 /**
751  * amdgpu_dummy_page_fini - free dummy page used by the driver
752  *
753  * @adev: amdgpu_device pointer
754  *
755  * Frees the dummy page used by the driver (all asics).
756  */
757 void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
758 {
759         if (adev->dummy_page.page == NULL)
760                 return;
761         pci_unmap_page(adev->pdev, adev->dummy_page.addr,
762                         PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
763         __free_page(adev->dummy_page.page);
764         adev->dummy_page.page = NULL;
765 }
766
767
768 /* ATOM accessor methods */
769 /*
770  * ATOM is an interpreted byte code stored in tables in the vbios.  The
771  * driver registers callbacks to access registers and the interpreter
772  * in the driver parses the tables and executes then to program specific
773  * actions (set display modes, asic init, etc.).  See amdgpu_atombios.c,
774  * atombios.h, and atom.c
775  */
776
777 /**
778  * cail_pll_read - read PLL register
779  *
780  * @info: atom card_info pointer
781  * @reg: PLL register offset
782  *
783  * Provides a PLL register accessor for the atom interpreter (r4xx+).
784  * Returns the value of the PLL register.
785  */
786 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
787 {
788         return 0;
789 }
790
791 /**
792  * cail_pll_write - write PLL register
793  *
794  * @info: atom card_info pointer
795  * @reg: PLL register offset
796  * @val: value to write to the pll register
797  *
798  * Provides a PLL register accessor for the atom interpreter (r4xx+).
799  */
800 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
801 {
802
803 }
804
805 /**
806  * cail_mc_read - read MC (Memory Controller) register
807  *
808  * @info: atom card_info pointer
809  * @reg: MC register offset
810  *
811  * Provides an MC register accessor for the atom interpreter (r4xx+).
812  * Returns the value of the MC register.
813  */
814 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
815 {
816         return 0;
817 }
818
819 /**
820  * cail_mc_write - write MC (Memory Controller) register
821  *
822  * @info: atom card_info pointer
823  * @reg: MC register offset
824  * @val: value to write to the pll register
825  *
826  * Provides a MC register accessor for the atom interpreter (r4xx+).
827  */
828 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
829 {
830
831 }
832
833 /**
834  * cail_reg_write - write MMIO register
835  *
836  * @info: atom card_info pointer
837  * @reg: MMIO register offset
838  * @val: value to write to the pll register
839  *
840  * Provides a MMIO register accessor for the atom interpreter (r4xx+).
841  */
842 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
843 {
844         struct amdgpu_device *adev = info->dev->dev_private;
845
846         WREG32(reg, val);
847 }
848
849 /**
850  * cail_reg_read - read MMIO register
851  *
852  * @info: atom card_info pointer
853  * @reg: MMIO register offset
854  *
855  * Provides an MMIO register accessor for the atom interpreter (r4xx+).
856  * Returns the value of the MMIO register.
857  */
858 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
859 {
860         struct amdgpu_device *adev = info->dev->dev_private;
861         uint32_t r;
862
863         r = RREG32(reg);
864         return r;
865 }
866
867 /**
868  * cail_ioreg_write - write IO register
869  *
870  * @info: atom card_info pointer
871  * @reg: IO register offset
872  * @val: value to write to the pll register
873  *
874  * Provides a IO register accessor for the atom interpreter (r4xx+).
875  */
876 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
877 {
878         struct amdgpu_device *adev = info->dev->dev_private;
879
880         WREG32_IO(reg, val);
881 }
882
883 /**
884  * cail_ioreg_read - read IO register
885  *
886  * @info: atom card_info pointer
887  * @reg: IO register offset
888  *
889  * Provides an IO register accessor for the atom interpreter (r4xx+).
890  * Returns the value of the IO register.
891  */
892 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
893 {
894         struct amdgpu_device *adev = info->dev->dev_private;
895         uint32_t r;
896
897         r = RREG32_IO(reg);
898         return r;
899 }
900
901 static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
902                                                  struct device_attribute *attr,
903                                                  char *buf)
904 {
905         struct drm_device *ddev = dev_get_drvdata(dev);
906         struct amdgpu_device *adev = ddev->dev_private;
907         struct atom_context *ctx = adev->mode_info.atom_context;
908
909         return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
910 }
911
912 static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
913                    NULL);
914
915 /**
916  * amdgpu_atombios_fini - free the driver info and callbacks for atombios
917  *
918  * @adev: amdgpu_device pointer
919  *
920  * Frees the driver info and register access callbacks for the ATOM
921  * interpreter (r4xx+).
922  * Called at driver shutdown.
923  */
924 static void amdgpu_atombios_fini(struct amdgpu_device *adev)
925 {
926         if (adev->mode_info.atom_context) {
927                 kfree(adev->mode_info.atom_context->scratch);
928                 kfree(adev->mode_info.atom_context->iio);
929         }
930         kfree(adev->mode_info.atom_context);
931         adev->mode_info.atom_context = NULL;
932         kfree(adev->mode_info.atom_card_info);
933         adev->mode_info.atom_card_info = NULL;
934         device_remove_file(adev->dev, &dev_attr_vbios_version);
935 }
936
937 /**
938  * amdgpu_atombios_init - init the driver info and callbacks for atombios
939  *
940  * @adev: amdgpu_device pointer
941  *
942  * Initializes the driver info and register access callbacks for the
943  * ATOM interpreter (r4xx+).
944  * Returns 0 on sucess, -ENOMEM on failure.
945  * Called at driver startup.
946  */
947 static int amdgpu_atombios_init(struct amdgpu_device *adev)
948 {
949         struct card_info *atom_card_info =
950             kzalloc(sizeof(struct card_info), GFP_KERNEL);
951         int ret;
952
953         if (!atom_card_info)
954                 return -ENOMEM;
955
956         adev->mode_info.atom_card_info = atom_card_info;
957         atom_card_info->dev = adev->ddev;
958         atom_card_info->reg_read = cail_reg_read;
959         atom_card_info->reg_write = cail_reg_write;
960         /* needed for iio ops */
961         if (adev->rio_mem) {
962                 atom_card_info->ioreg_read = cail_ioreg_read;
963                 atom_card_info->ioreg_write = cail_ioreg_write;
964         } else {
965                 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
966                 atom_card_info->ioreg_read = cail_reg_read;
967                 atom_card_info->ioreg_write = cail_reg_write;
968         }
969         atom_card_info->mc_read = cail_mc_read;
970         atom_card_info->mc_write = cail_mc_write;
971         atom_card_info->pll_read = cail_pll_read;
972         atom_card_info->pll_write = cail_pll_write;
973
974         adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
975         if (!adev->mode_info.atom_context) {
976                 amdgpu_atombios_fini(adev);
977                 return -ENOMEM;
978         }
979
980         mutex_init(&adev->mode_info.atom_context->mutex);
981         if (adev->is_atom_fw) {
982                 amdgpu_atomfirmware_scratch_regs_init(adev);
983                 amdgpu_atomfirmware_allocate_fb_scratch(adev);
984         } else {
985                 amdgpu_atombios_scratch_regs_init(adev);
986                 amdgpu_atombios_allocate_fb_scratch(adev);
987         }
988
989         ret = device_create_file(adev->dev, &dev_attr_vbios_version);
990         if (ret) {
991                 DRM_ERROR("Failed to create device file for VBIOS version\n");
992                 return ret;
993         }
994
995         return 0;
996 }
997
998 /* if we get transitioned to only one device, take VGA back */
999 /**
1000  * amdgpu_vga_set_decode - enable/disable vga decode
1001  *
1002  * @cookie: amdgpu_device pointer
1003  * @state: enable/disable vga decode
1004  *
1005  * Enable/disable vga decode (all asics).
1006  * Returns VGA resource flags.
1007  */
1008 static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1009 {
1010         struct amdgpu_device *adev = cookie;
1011         amdgpu_asic_set_vga_state(adev, state);
1012         if (state)
1013                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1014                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1015         else
1016                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1017 }
1018
1019 static void amdgpu_check_block_size(struct amdgpu_device *adev)
1020 {
1021         /* defines number of bits in page table versus page directory,
1022          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1023          * page table and the remaining bits are in the page directory */
1024         if (amdgpu_vm_block_size == -1)
1025                 return;
1026
1027         if (amdgpu_vm_block_size < 9) {
1028                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1029                          amdgpu_vm_block_size);
1030                 goto def_value;
1031         }
1032
1033         if (amdgpu_vm_block_size > 24 ||
1034             (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1035                 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1036                          amdgpu_vm_block_size);
1037                 goto def_value;
1038         }
1039
1040         return;
1041
1042 def_value:
1043         amdgpu_vm_block_size = -1;
1044 }
1045
1046 static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1047 {
1048         /* no need to check the default value */
1049         if (amdgpu_vm_size == -1)
1050                 return;
1051
1052         if (!is_power_of_2(amdgpu_vm_size)) {
1053                 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1054                          amdgpu_vm_size);
1055                 goto def_value;
1056         }
1057
1058         if (amdgpu_vm_size < 1) {
1059                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1060                          amdgpu_vm_size);
1061                 goto def_value;
1062         }
1063
1064         /*
1065          * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1066          */
1067         if (amdgpu_vm_size > 1024) {
1068                 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1069                          amdgpu_vm_size);
1070                 goto def_value;
1071         }
1072
1073         return;
1074
1075 def_value:
1076         amdgpu_vm_size = -1;
1077 }
1078
1079 /**
1080  * amdgpu_check_arguments - validate module params
1081  *
1082  * @adev: amdgpu_device pointer
1083  *
1084  * Validates certain module parameters and updates
1085  * the associated values used by the driver (all asics).
1086  */
1087 static void amdgpu_check_arguments(struct amdgpu_device *adev)
1088 {
1089         if (amdgpu_sched_jobs < 4) {
1090                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1091                          amdgpu_sched_jobs);
1092                 amdgpu_sched_jobs = 4;
1093         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1094                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1095                          amdgpu_sched_jobs);
1096                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1097         }
1098
1099         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1100                 /* gart size must be greater or equal to 32M */
1101                 dev_warn(adev->dev, "gart size (%d) too small\n",
1102                          amdgpu_gart_size);
1103                 amdgpu_gart_size = -1;
1104         }
1105
1106         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1107                 /* gtt size must be greater or equal to 32M */
1108                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1109                                  amdgpu_gtt_size);
1110                 amdgpu_gtt_size = -1;
1111         }
1112
1113         /* valid range is between 4 and 9 inclusive */
1114         if (amdgpu_vm_fragment_size != -1 &&
1115             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1116                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1117                 amdgpu_vm_fragment_size = -1;
1118         }
1119
1120         amdgpu_check_vm_size(adev);
1121
1122         amdgpu_check_block_size(adev);
1123
1124         if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1125             !is_power_of_2(amdgpu_vram_page_split))) {
1126                 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1127                          amdgpu_vram_page_split);
1128                 amdgpu_vram_page_split = 1024;
1129         }
1130 }
1131
1132 /**
1133  * amdgpu_switcheroo_set_state - set switcheroo state
1134  *
1135  * @pdev: pci dev pointer
1136  * @state: vga_switcheroo state
1137  *
1138  * Callback for the switcheroo driver.  Suspends or resumes the
1139  * the asics before or after it is powered up using ACPI methods.
1140  */
1141 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1142 {
1143         struct drm_device *dev = pci_get_drvdata(pdev);
1144
1145         if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1146                 return;
1147
1148         if (state == VGA_SWITCHEROO_ON) {
1149                 pr_info("amdgpu: switched on\n");
1150                 /* don't suspend or resume card normally */
1151                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1152
1153                 amdgpu_device_resume(dev, true, true);
1154
1155                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1156                 drm_kms_helper_poll_enable(dev);
1157         } else {
1158                 pr_info("amdgpu: switched off\n");
1159                 drm_kms_helper_poll_disable(dev);
1160                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1161                 amdgpu_device_suspend(dev, true, true);
1162                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1163         }
1164 }
1165
1166 /**
1167  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1168  *
1169  * @pdev: pci dev pointer
1170  *
1171  * Callback for the switcheroo driver.  Check of the switcheroo
1172  * state can be changed.
1173  * Returns true if the state can be changed, false if not.
1174  */
1175 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1176 {
1177         struct drm_device *dev = pci_get_drvdata(pdev);
1178
1179         /*
1180         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1181         * locking inversion with the driver load path. And the access here is
1182         * completely racy anyway. So don't bother with locking for now.
1183         */
1184         return dev->open_count == 0;
1185 }
1186
1187 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1188         .set_gpu_state = amdgpu_switcheroo_set_state,
1189         .reprobe = NULL,
1190         .can_switch = amdgpu_switcheroo_can_switch,
1191 };
1192
1193 int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
1194                                   enum amd_ip_block_type block_type,
1195                                   enum amd_clockgating_state state)
1196 {
1197         int i, r = 0;
1198
1199         for (i = 0; i < adev->num_ip_blocks; i++) {
1200                 if (!adev->ip_blocks[i].status.valid)
1201                         continue;
1202                 if (adev->ip_blocks[i].version->type != block_type)
1203                         continue;
1204                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1205                         continue;
1206                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1207                         (void *)adev, state);
1208                 if (r)
1209                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1210                                   adev->ip_blocks[i].version->funcs->name, r);
1211         }
1212         return r;
1213 }
1214
1215 int amdgpu_set_powergating_state(struct amdgpu_device *adev,
1216                                   enum amd_ip_block_type block_type,
1217                                   enum amd_powergating_state state)
1218 {
1219         int i, r = 0;
1220
1221         for (i = 0; i < adev->num_ip_blocks; i++) {
1222                 if (!adev->ip_blocks[i].status.valid)
1223                         continue;
1224                 if (adev->ip_blocks[i].version->type != block_type)
1225                         continue;
1226                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1227                         continue;
1228                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1229                         (void *)adev, state);
1230                 if (r)
1231                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1232                                   adev->ip_blocks[i].version->funcs->name, r);
1233         }
1234         return r;
1235 }
1236
1237 void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1238 {
1239         int i;
1240
1241         for (i = 0; i < adev->num_ip_blocks; i++) {
1242                 if (!adev->ip_blocks[i].status.valid)
1243                         continue;
1244                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1245                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1246         }
1247 }
1248
1249 int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1250                          enum amd_ip_block_type block_type)
1251 {
1252         int i, r;
1253
1254         for (i = 0; i < adev->num_ip_blocks; i++) {
1255                 if (!adev->ip_blocks[i].status.valid)
1256                         continue;
1257                 if (adev->ip_blocks[i].version->type == block_type) {
1258                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1259                         if (r)
1260                                 return r;
1261                         break;
1262                 }
1263         }
1264         return 0;
1265
1266 }
1267
1268 bool amdgpu_is_idle(struct amdgpu_device *adev,
1269                     enum amd_ip_block_type block_type)
1270 {
1271         int i;
1272
1273         for (i = 0; i < adev->num_ip_blocks; i++) {
1274                 if (!adev->ip_blocks[i].status.valid)
1275                         continue;
1276                 if (adev->ip_blocks[i].version->type == block_type)
1277                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1278         }
1279         return true;
1280
1281 }
1282
1283 struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1284                                              enum amd_ip_block_type type)
1285 {
1286         int i;
1287
1288         for (i = 0; i < adev->num_ip_blocks; i++)
1289                 if (adev->ip_blocks[i].version->type == type)
1290                         return &adev->ip_blocks[i];
1291
1292         return NULL;
1293 }
1294
1295 /**
1296  * amdgpu_ip_block_version_cmp
1297  *
1298  * @adev: amdgpu_device pointer
1299  * @type: enum amd_ip_block_type
1300  * @major: major version
1301  * @minor: minor version
1302  *
1303  * return 0 if equal or greater
1304  * return 1 if smaller or the ip_block doesn't exist
1305  */
1306 int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
1307                                 enum amd_ip_block_type type,
1308                                 u32 major, u32 minor)
1309 {
1310         struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
1311
1312         if (ip_block && ((ip_block->version->major > major) ||
1313                         ((ip_block->version->major == major) &&
1314                         (ip_block->version->minor >= minor))))
1315                 return 0;
1316
1317         return 1;
1318 }
1319
1320 /**
1321  * amdgpu_ip_block_add
1322  *
1323  * @adev: amdgpu_device pointer
1324  * @ip_block_version: pointer to the IP to add
1325  *
1326  * Adds the IP block driver information to the collection of IPs
1327  * on the asic.
1328  */
1329 int amdgpu_ip_block_add(struct amdgpu_device *adev,
1330                         const struct amdgpu_ip_block_version *ip_block_version)
1331 {
1332         if (!ip_block_version)
1333                 return -EINVAL;
1334
1335         DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
1336                   ip_block_version->funcs->name);
1337
1338         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1339
1340         return 0;
1341 }
1342
1343 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1344 {
1345         adev->enable_virtual_display = false;
1346
1347         if (amdgpu_virtual_display) {
1348                 struct drm_device *ddev = adev->ddev;
1349                 const char *pci_address_name = pci_name(ddev->pdev);
1350                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1351
1352                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1353                 pciaddstr_tmp = pciaddstr;
1354                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1355                         pciaddname = strsep(&pciaddname_tmp, ",");
1356                         if (!strcmp("all", pciaddname)
1357                             || !strcmp(pci_address_name, pciaddname)) {
1358                                 long num_crtc;
1359                                 int res = -1;
1360
1361                                 adev->enable_virtual_display = true;
1362
1363                                 if (pciaddname_tmp)
1364                                         res = kstrtol(pciaddname_tmp, 10,
1365                                                       &num_crtc);
1366
1367                                 if (!res) {
1368                                         if (num_crtc < 1)
1369                                                 num_crtc = 1;
1370                                         if (num_crtc > 6)
1371                                                 num_crtc = 6;
1372                                         adev->mode_info.num_crtc = num_crtc;
1373                                 } else {
1374                                         adev->mode_info.num_crtc = 1;
1375                                 }
1376                                 break;
1377                         }
1378                 }
1379
1380                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1381                          amdgpu_virtual_display, pci_address_name,
1382                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1383
1384                 kfree(pciaddstr);
1385         }
1386 }
1387
1388 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1389 {
1390         const char *chip_name;
1391         char fw_name[30];
1392         int err;
1393         const struct gpu_info_firmware_header_v1_0 *hdr;
1394
1395         adev->firmware.gpu_info_fw = NULL;
1396
1397         switch (adev->asic_type) {
1398         case CHIP_TOPAZ:
1399         case CHIP_TONGA:
1400         case CHIP_FIJI:
1401         case CHIP_POLARIS11:
1402         case CHIP_POLARIS10:
1403         case CHIP_POLARIS12:
1404         case CHIP_CARRIZO:
1405         case CHIP_STONEY:
1406 #ifdef CONFIG_DRM_AMDGPU_SI
1407         case CHIP_VERDE:
1408         case CHIP_TAHITI:
1409         case CHIP_PITCAIRN:
1410         case CHIP_OLAND:
1411         case CHIP_HAINAN:
1412 #endif
1413 #ifdef CONFIG_DRM_AMDGPU_CIK
1414         case CHIP_BONAIRE:
1415         case CHIP_HAWAII:
1416         case CHIP_KAVERI:
1417         case CHIP_KABINI:
1418         case CHIP_MULLINS:
1419 #endif
1420         default:
1421                 return 0;
1422         case CHIP_VEGA10:
1423                 chip_name = "vega10";
1424                 break;
1425         case CHIP_RAVEN:
1426                 chip_name = "raven";
1427                 break;
1428         }
1429
1430         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1431         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1432         if (err) {
1433                 dev_err(adev->dev,
1434                         "Failed to load gpu_info firmware \"%s\"\n",
1435                         fw_name);
1436                 goto out;
1437         }
1438         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1439         if (err) {
1440                 dev_err(adev->dev,
1441                         "Failed to validate gpu_info firmware \"%s\"\n",
1442                         fw_name);
1443                 goto out;
1444         }
1445
1446         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1447         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1448
1449         switch (hdr->version_major) {
1450         case 1:
1451         {
1452                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1453                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1454                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1455
1456                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1457                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1458                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1459                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1460                 adev->gfx.config.max_texture_channel_caches =
1461                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
1462                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1463                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1464                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1465                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1466                 adev->gfx.config.double_offchip_lds_buf =
1467                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1468                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1469                 adev->gfx.cu_info.max_waves_per_simd =
1470                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1471                 adev->gfx.cu_info.max_scratch_slots_per_cu =
1472                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1473                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1474                 break;
1475         }
1476         default:
1477                 dev_err(adev->dev,
1478                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1479                 err = -EINVAL;
1480                 goto out;
1481         }
1482 out:
1483         return err;
1484 }
1485
1486 static int amdgpu_early_init(struct amdgpu_device *adev)
1487 {
1488         int i, r;
1489
1490         amdgpu_device_enable_virtual_display(adev);
1491
1492         switch (adev->asic_type) {
1493         case CHIP_TOPAZ:
1494         case CHIP_TONGA:
1495         case CHIP_FIJI:
1496         case CHIP_POLARIS11:
1497         case CHIP_POLARIS10:
1498         case CHIP_POLARIS12:
1499         case CHIP_CARRIZO:
1500         case CHIP_STONEY:
1501                 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1502                         adev->family = AMDGPU_FAMILY_CZ;
1503                 else
1504                         adev->family = AMDGPU_FAMILY_VI;
1505
1506                 r = vi_set_ip_blocks(adev);
1507                 if (r)
1508                         return r;
1509                 break;
1510 #ifdef CONFIG_DRM_AMDGPU_SI
1511         case CHIP_VERDE:
1512         case CHIP_TAHITI:
1513         case CHIP_PITCAIRN:
1514         case CHIP_OLAND:
1515         case CHIP_HAINAN:
1516                 adev->family = AMDGPU_FAMILY_SI;
1517                 r = si_set_ip_blocks(adev);
1518                 if (r)
1519                         return r;
1520                 break;
1521 #endif
1522 #ifdef CONFIG_DRM_AMDGPU_CIK
1523         case CHIP_BONAIRE:
1524         case CHIP_HAWAII:
1525         case CHIP_KAVERI:
1526         case CHIP_KABINI:
1527         case CHIP_MULLINS:
1528                 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1529                         adev->family = AMDGPU_FAMILY_CI;
1530                 else
1531                         adev->family = AMDGPU_FAMILY_KV;
1532
1533                 r = cik_set_ip_blocks(adev);
1534                 if (r)
1535                         return r;
1536                 break;
1537 #endif
1538         case  CHIP_VEGA10:
1539         case  CHIP_RAVEN:
1540                 if (adev->asic_type == CHIP_RAVEN)
1541                         adev->family = AMDGPU_FAMILY_RV;
1542                 else
1543                         adev->family = AMDGPU_FAMILY_AI;
1544
1545                 r = soc15_set_ip_blocks(adev);
1546                 if (r)
1547                         return r;
1548                 break;
1549         default:
1550                 /* FIXME: not supported yet */
1551                 return -EINVAL;
1552         }
1553
1554         r = amdgpu_device_parse_gpu_info_fw(adev);
1555         if (r)
1556                 return r;
1557
1558         if (amdgpu_sriov_vf(adev)) {
1559                 r = amdgpu_virt_request_full_gpu(adev, true);
1560                 if (r)
1561                         return r;
1562         }
1563
1564         for (i = 0; i < adev->num_ip_blocks; i++) {
1565                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1566                         DRM_ERROR("disabled ip block: %d <%s>\n",
1567                                   i, adev->ip_blocks[i].version->funcs->name);
1568                         adev->ip_blocks[i].status.valid = false;
1569                 } else {
1570                         if (adev->ip_blocks[i].version->funcs->early_init) {
1571                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1572                                 if (r == -ENOENT) {
1573                                         adev->ip_blocks[i].status.valid = false;
1574                                 } else if (r) {
1575                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
1576                                                   adev->ip_blocks[i].version->funcs->name, r);
1577                                         return r;
1578                                 } else {
1579                                         adev->ip_blocks[i].status.valid = true;
1580                                 }
1581                         } else {
1582                                 adev->ip_blocks[i].status.valid = true;
1583                         }
1584                 }
1585         }
1586
1587         adev->cg_flags &= amdgpu_cg_mask;
1588         adev->pg_flags &= amdgpu_pg_mask;
1589
1590         return 0;
1591 }
1592
1593 static int amdgpu_init(struct amdgpu_device *adev)
1594 {
1595         int i, r;
1596
1597         for (i = 0; i < adev->num_ip_blocks; i++) {
1598                 if (!adev->ip_blocks[i].status.valid)
1599                         continue;
1600                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1601                 if (r) {
1602                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1603                                   adev->ip_blocks[i].version->funcs->name, r);
1604                         return r;
1605                 }
1606                 adev->ip_blocks[i].status.sw = true;
1607
1608                 /* need to do gmc hw init early so we can allocate gpu mem */
1609                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1610                         r = amdgpu_vram_scratch_init(adev);
1611                         if (r) {
1612                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1613                                 return r;
1614                         }
1615                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1616                         if (r) {
1617                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
1618                                 return r;
1619                         }
1620                         r = amdgpu_wb_init(adev);
1621                         if (r) {
1622                                 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
1623                                 return r;
1624                         }
1625                         adev->ip_blocks[i].status.hw = true;
1626
1627                         /* right after GMC hw init, we create CSA */
1628                         if (amdgpu_sriov_vf(adev)) {
1629                                 r = amdgpu_allocate_static_csa(adev);
1630                                 if (r) {
1631                                         DRM_ERROR("allocate CSA failed %d\n", r);
1632                                         return r;
1633                                 }
1634                         }
1635                 }
1636         }
1637
1638         mutex_lock(&adev->firmware.mutex);
1639         if (amdgpu_ucode_init_bo(adev))
1640                 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
1641         mutex_unlock(&adev->firmware.mutex);
1642
1643         for (i = 0; i < adev->num_ip_blocks; i++) {
1644                 if (!adev->ip_blocks[i].status.sw)
1645                         continue;
1646                 /* gmc hw init is done early */
1647                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
1648                         continue;
1649                 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1650                 if (r) {
1651                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1652                                   adev->ip_blocks[i].version->funcs->name, r);
1653                         return r;
1654                 }
1655                 adev->ip_blocks[i].status.hw = true;
1656         }
1657
1658         return 0;
1659 }
1660
1661 static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
1662 {
1663         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1664 }
1665
1666 static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
1667 {
1668         return !!memcmp(adev->gart.ptr, adev->reset_magic,
1669                         AMDGPU_RESET_MAGIC_NUM);
1670 }
1671
1672 static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
1673 {
1674         int i = 0, r;
1675
1676         for (i = 0; i < adev->num_ip_blocks; i++) {
1677                 if (!adev->ip_blocks[i].status.valid)
1678                         continue;
1679                 /* skip CG for VCE/UVD, it's handled specially */
1680                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1681                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1682                         /* enable clockgating to save power */
1683                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1684                                                                                      AMD_CG_STATE_GATE);
1685                         if (r) {
1686                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1687                                           adev->ip_blocks[i].version->funcs->name, r);
1688                                 return r;
1689                         }
1690                 }
1691         }
1692         return 0;
1693 }
1694
1695 static int amdgpu_late_init(struct amdgpu_device *adev)
1696 {
1697         int i = 0, r;
1698
1699         for (i = 0; i < adev->num_ip_blocks; i++) {
1700                 if (!adev->ip_blocks[i].status.valid)
1701                         continue;
1702                 if (adev->ip_blocks[i].version->funcs->late_init) {
1703                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1704                         if (r) {
1705                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1706                                           adev->ip_blocks[i].version->funcs->name, r);
1707                                 return r;
1708                         }
1709                         adev->ip_blocks[i].status.late_initialized = true;
1710                 }
1711         }
1712
1713         mod_delayed_work(system_wq, &adev->late_init_work,
1714                         msecs_to_jiffies(AMDGPU_RESUME_MS));
1715
1716         amdgpu_fill_reset_magic(adev);
1717
1718         return 0;
1719 }
1720
1721 static int amdgpu_fini(struct amdgpu_device *adev)
1722 {
1723         int i, r;
1724
1725         /* need to disable SMC first */
1726         for (i = 0; i < adev->num_ip_blocks; i++) {
1727                 if (!adev->ip_blocks[i].status.hw)
1728                         continue;
1729                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
1730                         /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1731                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1732                                                                                      AMD_CG_STATE_UNGATE);
1733                         if (r) {
1734                                 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1735                                           adev->ip_blocks[i].version->funcs->name, r);
1736                                 return r;
1737                         }
1738                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1739                         /* XXX handle errors */
1740                         if (r) {
1741                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1742                                           adev->ip_blocks[i].version->funcs->name, r);
1743                         }
1744                         adev->ip_blocks[i].status.hw = false;
1745                         break;
1746                 }
1747         }
1748
1749         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1750                 if (!adev->ip_blocks[i].status.hw)
1751                         continue;
1752                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1753                         amdgpu_wb_fini(adev);
1754                         amdgpu_vram_scratch_fini(adev);
1755                 }
1756
1757                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1758                         adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1759                         /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1760                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1761                                                                                      AMD_CG_STATE_UNGATE);
1762                         if (r) {
1763                                 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1764                                           adev->ip_blocks[i].version->funcs->name, r);
1765                                 return r;
1766                         }
1767                 }
1768
1769                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1770                 /* XXX handle errors */
1771                 if (r) {
1772                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1773                                   adev->ip_blocks[i].version->funcs->name, r);
1774                 }
1775
1776                 adev->ip_blocks[i].status.hw = false;
1777         }
1778         if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT)
1779                 amdgpu_ucode_fini_bo(adev);
1780
1781         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1782                 if (!adev->ip_blocks[i].status.sw)
1783                         continue;
1784                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
1785                 /* XXX handle errors */
1786                 if (r) {
1787                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1788                                   adev->ip_blocks[i].version->funcs->name, r);
1789                 }
1790                 adev->ip_blocks[i].status.sw = false;
1791                 adev->ip_blocks[i].status.valid = false;
1792         }
1793
1794         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1795                 if (!adev->ip_blocks[i].status.late_initialized)
1796                         continue;
1797                 if (adev->ip_blocks[i].version->funcs->late_fini)
1798                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1799                 adev->ip_blocks[i].status.late_initialized = false;
1800         }
1801
1802         if (amdgpu_sriov_vf(adev))
1803                 amdgpu_virt_release_full_gpu(adev, false);
1804
1805         return 0;
1806 }
1807
1808 static void amdgpu_late_init_func_handler(struct work_struct *work)
1809 {
1810         struct amdgpu_device *adev =
1811                 container_of(work, struct amdgpu_device, late_init_work.work);
1812         amdgpu_late_set_cg_state(adev);
1813 }
1814
1815 int amdgpu_suspend(struct amdgpu_device *adev)
1816 {
1817         int i, r;
1818
1819         if (amdgpu_sriov_vf(adev))
1820                 amdgpu_virt_request_full_gpu(adev, false);
1821
1822         /* ungate SMC block first */
1823         r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1824                                          AMD_CG_STATE_UNGATE);
1825         if (r) {
1826                 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1827         }
1828
1829         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1830                 if (!adev->ip_blocks[i].status.valid)
1831                         continue;
1832                 /* ungate blocks so that suspend can properly shut them down */
1833                 if (i != AMD_IP_BLOCK_TYPE_SMC) {
1834                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1835                                                                                      AMD_CG_STATE_UNGATE);
1836                         if (r) {
1837                                 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1838                                           adev->ip_blocks[i].version->funcs->name, r);
1839                         }
1840                 }
1841                 /* XXX handle errors */
1842                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
1843                 /* XXX handle errors */
1844                 if (r) {
1845                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
1846                                   adev->ip_blocks[i].version->funcs->name, r);
1847                 }
1848         }
1849
1850         if (amdgpu_sriov_vf(adev))
1851                 amdgpu_virt_release_full_gpu(adev, false);
1852
1853         return 0;
1854 }
1855
1856 static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
1857 {
1858         int i, r;
1859
1860         static enum amd_ip_block_type ip_order[] = {
1861                 AMD_IP_BLOCK_TYPE_GMC,
1862                 AMD_IP_BLOCK_TYPE_COMMON,
1863                 AMD_IP_BLOCK_TYPE_IH,
1864         };
1865
1866         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1867                 int j;
1868                 struct amdgpu_ip_block *block;
1869
1870                 for (j = 0; j < adev->num_ip_blocks; j++) {
1871                         block = &adev->ip_blocks[j];
1872
1873                         if (block->version->type != ip_order[i] ||
1874                                 !block->status.valid)
1875                                 continue;
1876
1877                         r = block->version->funcs->hw_init(adev);
1878                         DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
1879                 }
1880         }
1881
1882         return 0;
1883 }
1884
1885 static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
1886 {
1887         int i, r;
1888
1889         static enum amd_ip_block_type ip_order[] = {
1890                 AMD_IP_BLOCK_TYPE_SMC,
1891                 AMD_IP_BLOCK_TYPE_DCE,
1892                 AMD_IP_BLOCK_TYPE_GFX,
1893                 AMD_IP_BLOCK_TYPE_SDMA,
1894                 AMD_IP_BLOCK_TYPE_UVD,
1895                 AMD_IP_BLOCK_TYPE_VCE
1896         };
1897
1898         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1899                 int j;
1900                 struct amdgpu_ip_block *block;
1901
1902                 for (j = 0; j < adev->num_ip_blocks; j++) {
1903                         block = &adev->ip_blocks[j];
1904
1905                         if (block->version->type != ip_order[i] ||
1906                                 !block->status.valid)
1907                                 continue;
1908
1909                         r = block->version->funcs->hw_init(adev);
1910                         DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
1911                 }
1912         }
1913
1914         return 0;
1915 }
1916
1917 static int amdgpu_resume_phase1(struct amdgpu_device *adev)
1918 {
1919         int i, r;
1920
1921         for (i = 0; i < adev->num_ip_blocks; i++) {
1922                 if (!adev->ip_blocks[i].status.valid)
1923                         continue;
1924                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1925                                 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1926                                 adev->ip_blocks[i].version->type ==
1927                                 AMD_IP_BLOCK_TYPE_IH) {
1928                         r = adev->ip_blocks[i].version->funcs->resume(adev);
1929                         if (r) {
1930                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
1931                                           adev->ip_blocks[i].version->funcs->name, r);
1932                                 return r;
1933                         }
1934                 }
1935         }
1936
1937         return 0;
1938 }
1939
1940 static int amdgpu_resume_phase2(struct amdgpu_device *adev)
1941 {
1942         int i, r;
1943
1944         for (i = 0; i < adev->num_ip_blocks; i++) {
1945                 if (!adev->ip_blocks[i].status.valid)
1946                         continue;
1947                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1948                                 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1949                                 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1950                         continue;
1951                 r = adev->ip_blocks[i].version->funcs->resume(adev);
1952                 if (r) {
1953                         DRM_ERROR("resume of IP block <%s> failed %d\n",
1954                                   adev->ip_blocks[i].version->funcs->name, r);
1955                         return r;
1956                 }
1957         }
1958
1959         return 0;
1960 }
1961
1962 static int amdgpu_resume(struct amdgpu_device *adev)
1963 {
1964         int r;
1965
1966         r = amdgpu_resume_phase1(adev);
1967         if (r)
1968                 return r;
1969         r = amdgpu_resume_phase2(adev);
1970
1971         return r;
1972 }
1973
1974 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
1975 {
1976         if (adev->is_atom_fw) {
1977                 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1978                         adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1979         } else {
1980                 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1981                         adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1982         }
1983 }
1984
1985 /**
1986  * amdgpu_device_init - initialize the driver
1987  *
1988  * @adev: amdgpu_device pointer
1989  * @pdev: drm dev pointer
1990  * @pdev: pci dev pointer
1991  * @flags: driver flags
1992  *
1993  * Initializes the driver info and hw (all asics).
1994  * Returns 0 for success or an error on failure.
1995  * Called at driver startup.
1996  */
1997 int amdgpu_device_init(struct amdgpu_device *adev,
1998                        struct drm_device *ddev,
1999                        struct pci_dev *pdev,
2000                        uint32_t flags)
2001 {
2002         int r, i;
2003         bool runtime = false;
2004         u32 max_MBps;
2005
2006         adev->shutdown = false;
2007         adev->dev = &pdev->dev;
2008         adev->ddev = ddev;
2009         adev->pdev = pdev;
2010         adev->flags = flags;
2011         adev->asic_type = flags & AMD_ASIC_MASK;
2012         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2013         adev->mc.gart_size = 512 * 1024 * 1024;
2014         adev->accel_working = false;
2015         adev->num_rings = 0;
2016         adev->mman.buffer_funcs = NULL;
2017         adev->mman.buffer_funcs_ring = NULL;
2018         adev->vm_manager.vm_pte_funcs = NULL;
2019         adev->vm_manager.vm_pte_num_rings = 0;
2020         adev->gart.gart_funcs = NULL;
2021         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2022
2023         adev->smc_rreg = &amdgpu_invalid_rreg;
2024         adev->smc_wreg = &amdgpu_invalid_wreg;
2025         adev->pcie_rreg = &amdgpu_invalid_rreg;
2026         adev->pcie_wreg = &amdgpu_invalid_wreg;
2027         adev->pciep_rreg = &amdgpu_invalid_rreg;
2028         adev->pciep_wreg = &amdgpu_invalid_wreg;
2029         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2030         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2031         adev->didt_rreg = &amdgpu_invalid_rreg;
2032         adev->didt_wreg = &amdgpu_invalid_wreg;
2033         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2034         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
2035         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2036         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2037
2038
2039         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2040                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2041                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
2042
2043         /* mutex initialization are all done here so we
2044          * can recall function without having locking issues */
2045         atomic_set(&adev->irq.ih.lock, 0);
2046         mutex_init(&adev->firmware.mutex);
2047         mutex_init(&adev->pm.mutex);
2048         mutex_init(&adev->gfx.gpu_clock_mutex);
2049         mutex_init(&adev->srbm_mutex);
2050         mutex_init(&adev->grbm_idx_mutex);
2051         mutex_init(&adev->mn_lock);
2052         mutex_init(&adev->virt.vf_errors.lock);
2053         hash_init(adev->mn_hash);
2054
2055         amdgpu_check_arguments(adev);
2056
2057         spin_lock_init(&adev->mmio_idx_lock);
2058         spin_lock_init(&adev->smc_idx_lock);
2059         spin_lock_init(&adev->pcie_idx_lock);
2060         spin_lock_init(&adev->uvd_ctx_idx_lock);
2061         spin_lock_init(&adev->didt_idx_lock);
2062         spin_lock_init(&adev->gc_cac_idx_lock);
2063         spin_lock_init(&adev->se_cac_idx_lock);
2064         spin_lock_init(&adev->audio_endpt_idx_lock);
2065         spin_lock_init(&adev->mm_stats.lock);
2066
2067         INIT_LIST_HEAD(&adev->shadow_list);
2068         mutex_init(&adev->shadow_list_lock);
2069
2070         INIT_LIST_HEAD(&adev->gtt_list);
2071         spin_lock_init(&adev->gtt_list_lock);
2072
2073         INIT_LIST_HEAD(&adev->ring_lru_list);
2074         spin_lock_init(&adev->ring_lru_list_lock);
2075
2076         INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
2077
2078         /* Registers mapping */
2079         /* TODO: block userspace mapping of io register */
2080         if (adev->asic_type >= CHIP_BONAIRE) {
2081                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2082                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2083         } else {
2084                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2085                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2086         }
2087
2088         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2089         if (adev->rmmio == NULL) {
2090                 return -ENOMEM;
2091         }
2092         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2093         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2094
2095         /* doorbell bar mapping */
2096         amdgpu_doorbell_init(adev);
2097
2098         /* io port mapping */
2099         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2100                 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2101                         adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2102                         adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2103                         break;
2104                 }
2105         }
2106         if (adev->rio_mem == NULL)
2107                 DRM_INFO("PCI I/O BAR is not found.\n");
2108
2109         /* early init functions */
2110         r = amdgpu_early_init(adev);
2111         if (r)
2112                 return r;
2113
2114         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2115         /* this will fail for cards that aren't VGA class devices, just
2116          * ignore it */
2117         vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2118
2119         if (amdgpu_runtime_pm == 1)
2120                 runtime = true;
2121         if (amdgpu_device_is_px(ddev))
2122                 runtime = true;
2123         if (!pci_is_thunderbolt_attached(adev->pdev))
2124                 vga_switcheroo_register_client(adev->pdev,
2125                                                &amdgpu_switcheroo_ops, runtime);
2126         if (runtime)
2127                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2128
2129         /* Read BIOS */
2130         if (!amdgpu_get_bios(adev)) {
2131                 r = -EINVAL;
2132                 goto failed;
2133         }
2134
2135         r = amdgpu_atombios_init(adev);
2136         if (r) {
2137                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2138                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2139                 goto failed;
2140         }
2141
2142         /* detect if we are with an SRIOV vbios */
2143         amdgpu_device_detect_sriov_bios(adev);
2144
2145         /* Post card if necessary */
2146         if (amdgpu_vpost_needed(adev)) {
2147                 if (!adev->bios) {
2148                         dev_err(adev->dev, "no vBIOS found\n");
2149                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2150                         r = -EINVAL;
2151                         goto failed;
2152                 }
2153                 DRM_INFO("GPU posting now...\n");
2154                 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2155                 if (r) {
2156                         dev_err(adev->dev, "gpu post error!\n");
2157                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
2158                         goto failed;
2159                 }
2160         } else {
2161                 DRM_INFO("GPU post is not needed\n");
2162         }
2163
2164         if (adev->is_atom_fw) {
2165                 /* Initialize clocks */
2166                 r = amdgpu_atomfirmware_get_clock_info(adev);
2167                 if (r) {
2168                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
2169                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2170                         goto failed;
2171                 }
2172         } else {
2173                 /* Initialize clocks */
2174                 r = amdgpu_atombios_get_clock_info(adev);
2175                 if (r) {
2176                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
2177                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2178                         goto failed;
2179                 }
2180                 /* init i2c buses */
2181                 amdgpu_atombios_i2c_init(adev);
2182         }
2183
2184         /* Fence driver */
2185         r = amdgpu_fence_driver_init(adev);
2186         if (r) {
2187                 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
2188                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
2189                 goto failed;
2190         }
2191
2192         /* init the mode config */
2193         drm_mode_config_init(adev->ddev);
2194
2195         r = amdgpu_init(adev);
2196         if (r) {
2197                 dev_err(adev->dev, "amdgpu_init failed\n");
2198                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
2199                 amdgpu_fini(adev);
2200                 goto failed;
2201         }
2202
2203         adev->accel_working = true;
2204
2205         amdgpu_vm_check_compute_bug(adev);
2206
2207         /* Initialize the buffer migration limit. */
2208         if (amdgpu_moverate >= 0)
2209                 max_MBps = amdgpu_moverate;
2210         else
2211                 max_MBps = 8; /* Allow 8 MB/s. */
2212         /* Get a log2 for easy divisions. */
2213         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2214
2215         r = amdgpu_ib_pool_init(adev);
2216         if (r) {
2217                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2218                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2219                 goto failed;
2220         }
2221
2222         r = amdgpu_ib_ring_tests(adev);
2223         if (r)
2224                 DRM_ERROR("ib ring test failed (%d).\n", r);
2225
2226         amdgpu_fbdev_init(adev);
2227
2228         r = amdgpu_pm_sysfs_init(adev);
2229         if (r)
2230                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2231
2232         r = amdgpu_gem_debugfs_init(adev);
2233         if (r)
2234                 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
2235
2236         r = amdgpu_debugfs_regs_init(adev);
2237         if (r)
2238                 DRM_ERROR("registering register debugfs failed (%d).\n", r);
2239
2240         r = amdgpu_debugfs_test_ib_ring_init(adev);
2241         if (r)
2242                 DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
2243
2244         r = amdgpu_debugfs_firmware_init(adev);
2245         if (r)
2246                 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
2247
2248         r = amdgpu_debugfs_vbios_dump_init(adev);
2249         if (r)
2250                 DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
2251
2252         if ((amdgpu_testing & 1)) {
2253                 if (adev->accel_working)
2254                         amdgpu_test_moves(adev);
2255                 else
2256                         DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2257         }
2258         if (amdgpu_benchmarking) {
2259                 if (adev->accel_working)
2260                         amdgpu_benchmark(adev, amdgpu_benchmarking);
2261                 else
2262                         DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2263         }
2264
2265         /* enable clockgating, etc. after ib tests, etc. since some blocks require
2266          * explicit gating rather than handling it automatically.
2267          */
2268         r = amdgpu_late_init(adev);
2269         if (r) {
2270                 dev_err(adev->dev, "amdgpu_late_init failed\n");
2271                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
2272                 goto failed;
2273         }
2274
2275         return 0;
2276
2277 failed:
2278         amdgpu_vf_error_trans_all(adev);
2279         if (runtime)
2280                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2281         return r;
2282 }
2283
2284 /**
2285  * amdgpu_device_fini - tear down the driver
2286  *
2287  * @adev: amdgpu_device pointer
2288  *
2289  * Tear down the driver info (all asics).
2290  * Called at driver shutdown.
2291  */
2292 void amdgpu_device_fini(struct amdgpu_device *adev)
2293 {
2294         int r;
2295
2296         DRM_INFO("amdgpu: finishing device.\n");
2297         adev->shutdown = true;
2298         if (adev->mode_info.mode_config_initialized)
2299                 drm_crtc_force_disable_all(adev->ddev);
2300         /* evict vram memory */
2301         amdgpu_bo_evict_vram(adev);
2302         amdgpu_ib_pool_fini(adev);
2303         amdgpu_fence_driver_fini(adev);
2304         amdgpu_fbdev_fini(adev);
2305         r = amdgpu_fini(adev);
2306         if (adev->firmware.gpu_info_fw) {
2307                 release_firmware(adev->firmware.gpu_info_fw);
2308                 adev->firmware.gpu_info_fw = NULL;
2309         }
2310         adev->accel_working = false;
2311         cancel_delayed_work_sync(&adev->late_init_work);
2312         /* free i2c buses */
2313         amdgpu_i2c_fini(adev);
2314         amdgpu_atombios_fini(adev);
2315         kfree(adev->bios);
2316         adev->bios = NULL;
2317         if (!pci_is_thunderbolt_attached(adev->pdev))
2318                 vga_switcheroo_unregister_client(adev->pdev);
2319         if (adev->flags & AMD_IS_PX)
2320                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2321         vga_client_register(adev->pdev, NULL, NULL, NULL);
2322         if (adev->rio_mem)
2323                 pci_iounmap(adev->pdev, adev->rio_mem);
2324         adev->rio_mem = NULL;
2325         iounmap(adev->rmmio);
2326         adev->rmmio = NULL;
2327         amdgpu_doorbell_fini(adev);
2328         amdgpu_pm_sysfs_fini(adev);
2329         amdgpu_debugfs_regs_cleanup(adev);
2330 }
2331
2332
2333 /*
2334  * Suspend & resume.
2335  */
2336 /**
2337  * amdgpu_device_suspend - initiate device suspend
2338  *
2339  * @pdev: drm dev pointer
2340  * @state: suspend state
2341  *
2342  * Puts the hw in the suspend state (all asics).
2343  * Returns 0 for success or an error on failure.
2344  * Called at driver suspend.
2345  */
2346 int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2347 {
2348         struct amdgpu_device *adev;
2349         struct drm_crtc *crtc;
2350         struct drm_connector *connector;
2351         int r;
2352
2353         if (dev == NULL || dev->dev_private == NULL) {
2354                 return -ENODEV;
2355         }
2356
2357         adev = dev->dev_private;
2358
2359         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2360                 return 0;
2361
2362         drm_kms_helper_poll_disable(dev);
2363
2364         /* turn off display hw */
2365         drm_modeset_lock_all(dev);
2366         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2367                 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2368         }
2369         drm_modeset_unlock_all(dev);
2370
2371         amdgpu_amdkfd_suspend(adev);
2372
2373         /* unpin the front buffers and cursors */
2374         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2375                 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2376                 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2377                 struct amdgpu_bo *robj;
2378
2379                 if (amdgpu_crtc->cursor_bo) {
2380                         struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2381                         r = amdgpu_bo_reserve(aobj, true);
2382                         if (r == 0) {
2383                                 amdgpu_bo_unpin(aobj);
2384                                 amdgpu_bo_unreserve(aobj);
2385                         }
2386                 }
2387
2388                 if (rfb == NULL || rfb->obj == NULL) {
2389                         continue;
2390                 }
2391                 robj = gem_to_amdgpu_bo(rfb->obj);
2392                 /* don't unpin kernel fb objects */
2393                 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2394                         r = amdgpu_bo_reserve(robj, true);
2395                         if (r == 0) {
2396                                 amdgpu_bo_unpin(robj);
2397                                 amdgpu_bo_unreserve(robj);
2398                         }
2399                 }
2400         }
2401         /* evict vram memory */
2402         amdgpu_bo_evict_vram(adev);
2403
2404         amdgpu_fence_driver_suspend(adev);
2405
2406         r = amdgpu_suspend(adev);
2407
2408         /* evict remaining vram memory
2409          * This second call to evict vram is to evict the gart page table
2410          * using the CPU.
2411          */
2412         amdgpu_bo_evict_vram(adev);
2413
2414         amdgpu_atombios_scratch_regs_save(adev);
2415         pci_save_state(dev->pdev);
2416         if (suspend) {
2417                 /* Shut down the device */
2418                 pci_disable_device(dev->pdev);
2419                 pci_set_power_state(dev->pdev, PCI_D3hot);
2420         } else {
2421                 r = amdgpu_asic_reset(adev);
2422                 if (r)
2423                         DRM_ERROR("amdgpu asic reset failed\n");
2424         }
2425
2426         if (fbcon) {
2427                 console_lock();
2428                 amdgpu_fbdev_set_suspend(adev, 1);
2429                 console_unlock();
2430         }
2431         return 0;
2432 }
2433
2434 /**
2435  * amdgpu_device_resume - initiate device resume
2436  *
2437  * @pdev: drm dev pointer
2438  *
2439  * Bring the hw back to operating state (all asics).
2440  * Returns 0 for success or an error on failure.
2441  * Called at driver resume.
2442  */
2443 int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2444 {
2445         struct drm_connector *connector;
2446         struct amdgpu_device *adev = dev->dev_private;
2447         struct drm_crtc *crtc;
2448         int r = 0;
2449
2450         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2451                 return 0;
2452
2453         if (fbcon)
2454                 console_lock();
2455
2456         if (resume) {
2457                 pci_set_power_state(dev->pdev, PCI_D0);
2458                 pci_restore_state(dev->pdev);
2459                 r = pci_enable_device(dev->pdev);
2460                 if (r)
2461                         goto unlock;
2462         }
2463         amdgpu_atombios_scratch_regs_restore(adev);
2464
2465         /* post card */
2466         if (amdgpu_need_post(adev)) {
2467                 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2468                 if (r)
2469                         DRM_ERROR("amdgpu asic init failed\n");
2470         }
2471
2472         r = amdgpu_resume(adev);
2473         if (r) {
2474                 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
2475                 goto unlock;
2476         }
2477         amdgpu_fence_driver_resume(adev);
2478
2479         if (resume) {
2480                 r = amdgpu_ib_ring_tests(adev);
2481                 if (r)
2482                         DRM_ERROR("ib ring test failed (%d).\n", r);
2483         }
2484
2485         r = amdgpu_late_init(adev);
2486         if (r)
2487                 goto unlock;
2488
2489         /* pin cursors */
2490         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2491                 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2492
2493                 if (amdgpu_crtc->cursor_bo) {
2494                         struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2495                         r = amdgpu_bo_reserve(aobj, true);
2496                         if (r == 0) {
2497                                 r = amdgpu_bo_pin(aobj,
2498                                                   AMDGPU_GEM_DOMAIN_VRAM,
2499                                                   &amdgpu_crtc->cursor_addr);
2500                                 if (r != 0)
2501                                         DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2502                                 amdgpu_bo_unreserve(aobj);
2503                         }
2504                 }
2505         }
2506         r = amdgpu_amdkfd_resume(adev);
2507         if (r)
2508                 return r;
2509
2510         /* blat the mode back in */
2511         if (fbcon) {
2512                 drm_helper_resume_force_mode(dev);
2513                 /* turn on display hw */
2514                 drm_modeset_lock_all(dev);
2515                 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2516                         drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2517                 }
2518                 drm_modeset_unlock_all(dev);
2519         }
2520
2521         drm_kms_helper_poll_enable(dev);
2522
2523         /*
2524          * Most of the connector probing functions try to acquire runtime pm
2525          * refs to ensure that the GPU is powered on when connector polling is
2526          * performed. Since we're calling this from a runtime PM callback,
2527          * trying to acquire rpm refs will cause us to deadlock.
2528          *
2529          * Since we're guaranteed to be holding the rpm lock, it's safe to
2530          * temporarily disable the rpm helpers so this doesn't deadlock us.
2531          */
2532 #ifdef CONFIG_PM
2533         dev->dev->power.disable_depth++;
2534 #endif
2535         drm_helper_hpd_irq_event(dev);
2536 #ifdef CONFIG_PM
2537         dev->dev->power.disable_depth--;
2538 #endif
2539
2540         if (fbcon)
2541                 amdgpu_fbdev_set_suspend(adev, 0);
2542
2543 unlock:
2544         if (fbcon)
2545                 console_unlock();
2546
2547         return r;
2548 }
2549
2550 static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2551 {
2552         int i;
2553         bool asic_hang = false;
2554
2555         for (i = 0; i < adev->num_ip_blocks; i++) {
2556                 if (!adev->ip_blocks[i].status.valid)
2557                         continue;
2558                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2559                         adev->ip_blocks[i].status.hang =
2560                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2561                 if (adev->ip_blocks[i].status.hang) {
2562                         DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
2563                         asic_hang = true;
2564                 }
2565         }
2566         return asic_hang;
2567 }
2568
2569 static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
2570 {
2571         int i, r = 0;
2572
2573         for (i = 0; i < adev->num_ip_blocks; i++) {
2574                 if (!adev->ip_blocks[i].status.valid)
2575                         continue;
2576                 if (adev->ip_blocks[i].status.hang &&
2577                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2578                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
2579                         if (r)
2580                                 return r;
2581                 }
2582         }
2583
2584         return 0;
2585 }
2586
2587 static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2588 {
2589         int i;
2590
2591         for (i = 0; i < adev->num_ip_blocks; i++) {
2592                 if (!adev->ip_blocks[i].status.valid)
2593                         continue;
2594                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2595                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2596                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2597                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2598                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2599                         if (adev->ip_blocks[i].status.hang) {
2600                                 DRM_INFO("Some block need full reset!\n");
2601                                 return true;
2602                         }
2603                 }
2604         }
2605         return false;
2606 }
2607
2608 static int amdgpu_soft_reset(struct amdgpu_device *adev)
2609 {
2610         int i, r = 0;
2611
2612         for (i = 0; i < adev->num_ip_blocks; i++) {
2613                 if (!adev->ip_blocks[i].status.valid)
2614                         continue;
2615                 if (adev->ip_blocks[i].status.hang &&
2616                     adev->ip_blocks[i].version->funcs->soft_reset) {
2617                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
2618                         if (r)
2619                                 return r;
2620                 }
2621         }
2622
2623         return 0;
2624 }
2625
2626 static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2627 {
2628         int i, r = 0;
2629
2630         for (i = 0; i < adev->num_ip_blocks; i++) {
2631                 if (!adev->ip_blocks[i].status.valid)
2632                         continue;
2633                 if (adev->ip_blocks[i].status.hang &&
2634                     adev->ip_blocks[i].version->funcs->post_soft_reset)
2635                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
2636                 if (r)
2637                         return r;
2638         }
2639
2640         return 0;
2641 }
2642
2643 bool amdgpu_need_backup(struct amdgpu_device *adev)
2644 {
2645         if (adev->flags & AMD_IS_APU)
2646                 return false;
2647
2648         return amdgpu_lockup_timeout > 0 ? true : false;
2649 }
2650
2651 static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2652                                            struct amdgpu_ring *ring,
2653                                            struct amdgpu_bo *bo,
2654                                            struct dma_fence **fence)
2655 {
2656         uint32_t domain;
2657         int r;
2658
2659         if (!bo->shadow)
2660                 return 0;
2661
2662         r = amdgpu_bo_reserve(bo, true);
2663         if (r)
2664                 return r;
2665         domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2666         /* if bo has been evicted, then no need to recover */
2667         if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2668                 r = amdgpu_bo_validate(bo->shadow);
2669                 if (r) {
2670                         DRM_ERROR("bo validate failed!\n");
2671                         goto err;
2672                 }
2673
2674                 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2675                                                  NULL, fence, true);
2676                 if (r) {
2677                         DRM_ERROR("recover page table failed!\n");
2678                         goto err;
2679                 }
2680         }
2681 err:
2682         amdgpu_bo_unreserve(bo);
2683         return r;
2684 }
2685
2686 /**
2687  * amdgpu_sriov_gpu_reset - reset the asic
2688  *
2689  * @adev: amdgpu device pointer
2690  * @job: which job trigger hang
2691  *
2692  * Attempt the reset the GPU if it has hung (all asics).
2693  * for SRIOV case.
2694  * Returns 0 for success or an error on failure.
2695  */
2696 int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
2697 {
2698         int i, j, r = 0;
2699         int resched;
2700         struct amdgpu_bo *bo, *tmp;
2701         struct amdgpu_ring *ring;
2702         struct dma_fence *fence = NULL, *next = NULL;
2703
2704         mutex_lock(&adev->virt.lock_reset);
2705         atomic_inc(&adev->gpu_reset_counter);
2706         adev->in_sriov_reset = true;
2707
2708         /* block TTM */
2709         resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2710
2711         /* we start from the ring trigger GPU hang */
2712         j = job ? job->ring->idx : 0;
2713
2714         /* block scheduler */
2715         for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2716                 ring = adev->rings[i % AMDGPU_MAX_RINGS];
2717                 if (!ring || !ring->sched.thread)
2718                         continue;
2719
2720                 kthread_park(ring->sched.thread);
2721
2722                 if (job && j != i)
2723                         continue;
2724
2725                 /* here give the last chance to check if job removed from mirror-list
2726                  * since we already pay some time on kthread_park */
2727                 if (job && list_empty(&job->base.node)) {
2728                         kthread_unpark(ring->sched.thread);
2729                         goto give_up_reset;
2730                 }
2731
2732                 if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
2733                         amd_sched_job_kickout(&job->base);
2734
2735                 /* only do job_reset on the hang ring if @job not NULL */
2736                 amd_sched_hw_job_reset(&ring->sched);
2737
2738                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2739                 amdgpu_fence_driver_force_completion_ring(ring);
2740         }
2741
2742         /* request to take full control of GPU before re-initialization  */
2743         if (job)
2744                 amdgpu_virt_reset_gpu(adev);
2745         else
2746                 amdgpu_virt_request_full_gpu(adev, true);
2747
2748
2749         /* Resume IP prior to SMC */
2750         amdgpu_sriov_reinit_early(adev);
2751
2752         /* we need recover gart prior to run SMC/CP/SDMA resume */
2753         amdgpu_ttm_recover_gart(adev);
2754
2755         /* now we are okay to resume SMC/CP/SDMA */
2756         amdgpu_sriov_reinit_late(adev);
2757
2758         amdgpu_irq_gpu_reset_resume_helper(adev);
2759
2760         if (amdgpu_ib_ring_tests(adev))
2761                 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2762
2763         /* release full control of GPU after ib test */
2764         amdgpu_virt_release_full_gpu(adev, true);
2765
2766         DRM_INFO("recover vram bo from shadow\n");
2767
2768         ring = adev->mman.buffer_funcs_ring;
2769         mutex_lock(&adev->shadow_list_lock);
2770         list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2771                 next = NULL;
2772                 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2773                 if (fence) {
2774                         r = dma_fence_wait(fence, false);
2775                         if (r) {
2776                                 WARN(r, "recovery from shadow isn't completed\n");
2777                                 break;
2778                         }
2779                 }
2780
2781                 dma_fence_put(fence);
2782                 fence = next;
2783         }
2784         mutex_unlock(&adev->shadow_list_lock);
2785
2786         if (fence) {
2787                 r = dma_fence_wait(fence, false);
2788                 if (r)
2789                         WARN(r, "recovery from shadow isn't completed\n");
2790         }
2791         dma_fence_put(fence);
2792
2793         for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2794                 ring = adev->rings[i % AMDGPU_MAX_RINGS];
2795                 if (!ring || !ring->sched.thread)
2796                         continue;
2797
2798                 if (job && j != i) {
2799                         kthread_unpark(ring->sched.thread);
2800                         continue;
2801                 }
2802
2803                 amd_sched_job_recovery(&ring->sched);
2804                 kthread_unpark(ring->sched.thread);
2805         }
2806
2807         drm_helper_resume_force_mode(adev->ddev);
2808 give_up_reset:
2809         ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2810         if (r) {
2811                 /* bad news, how to tell it to userspace ? */
2812                 dev_info(adev->dev, "GPU reset failed\n");
2813         } else {
2814                 dev_info(adev->dev, "GPU reset successed!\n");
2815         }
2816
2817         adev->in_sriov_reset = false;
2818         mutex_unlock(&adev->virt.lock_reset);
2819         return r;
2820 }
2821
2822 /**
2823  * amdgpu_gpu_reset - reset the asic
2824  *
2825  * @adev: amdgpu device pointer
2826  *
2827  * Attempt the reset the GPU if it has hung (all asics).
2828  * Returns 0 for success or an error on failure.
2829  */
2830 int amdgpu_gpu_reset(struct amdgpu_device *adev)
2831 {
2832         int i, r;
2833         int resched;
2834         bool need_full_reset, vram_lost = false;
2835
2836         if (!amdgpu_check_soft_reset(adev)) {
2837                 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2838                 return 0;
2839         }
2840
2841         atomic_inc(&adev->gpu_reset_counter);
2842
2843         /* block TTM */
2844         resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2845
2846         /* block scheduler */
2847         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2848                 struct amdgpu_ring *ring = adev->rings[i];
2849
2850                 if (!ring || !ring->sched.thread)
2851                         continue;
2852                 kthread_park(ring->sched.thread);
2853                 amd_sched_hw_job_reset(&ring->sched);
2854         }
2855         /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2856         amdgpu_fence_driver_force_completion(adev);
2857
2858         need_full_reset = amdgpu_need_full_reset(adev);
2859
2860         if (!need_full_reset) {
2861                 amdgpu_pre_soft_reset(adev);
2862                 r = amdgpu_soft_reset(adev);
2863                 amdgpu_post_soft_reset(adev);
2864                 if (r || amdgpu_check_soft_reset(adev)) {
2865                         DRM_INFO("soft reset failed, will fallback to full reset!\n");
2866                         need_full_reset = true;
2867                 }
2868         }
2869
2870         if (need_full_reset) {
2871                 r = amdgpu_suspend(adev);
2872
2873 retry:
2874                 amdgpu_atombios_scratch_regs_save(adev);
2875                 r = amdgpu_asic_reset(adev);
2876                 amdgpu_atombios_scratch_regs_restore(adev);
2877                 /* post card */
2878                 amdgpu_atom_asic_init(adev->mode_info.atom_context);
2879
2880                 if (!r) {
2881                         dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2882                         r = amdgpu_resume_phase1(adev);
2883                         if (r)
2884                                 goto out;
2885                         vram_lost = amdgpu_check_vram_lost(adev);
2886                         if (vram_lost) {
2887                                 DRM_ERROR("VRAM is lost!\n");
2888                                 atomic_inc(&adev->vram_lost_counter);
2889                         }
2890                         r = amdgpu_ttm_recover_gart(adev);
2891                         if (r)
2892                                 goto out;
2893                         r = amdgpu_resume_phase2(adev);
2894                         if (r)
2895                                 goto out;
2896                         if (vram_lost)
2897                                 amdgpu_fill_reset_magic(adev);
2898                 }
2899         }
2900 out:
2901         if (!r) {
2902                 amdgpu_irq_gpu_reset_resume_helper(adev);
2903                 r = amdgpu_ib_ring_tests(adev);
2904                 if (r) {
2905                         dev_err(adev->dev, "ib ring test failed (%d).\n", r);
2906                         r = amdgpu_suspend(adev);
2907                         need_full_reset = true;
2908                         goto retry;
2909                 }
2910                 /**
2911                  * recovery vm page tables, since we cannot depend on VRAM is
2912                  * consistent after gpu full reset.
2913                  */
2914                 if (need_full_reset && amdgpu_need_backup(adev)) {
2915                         struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2916                         struct amdgpu_bo *bo, *tmp;
2917                         struct dma_fence *fence = NULL, *next = NULL;
2918
2919                         DRM_INFO("recover vram bo from shadow\n");
2920                         mutex_lock(&adev->shadow_list_lock);
2921                         list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2922                                 next = NULL;
2923                                 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2924                                 if (fence) {
2925                                         r = dma_fence_wait(fence, false);
2926                                         if (r) {
2927                                                 WARN(r, "recovery from shadow isn't completed\n");
2928                                                 break;
2929                                         }
2930                                 }
2931
2932                                 dma_fence_put(fence);
2933                                 fence = next;
2934                         }
2935                         mutex_unlock(&adev->shadow_list_lock);
2936                         if (fence) {
2937                                 r = dma_fence_wait(fence, false);
2938                                 if (r)
2939                                         WARN(r, "recovery from shadow isn't completed\n");
2940                         }
2941                         dma_fence_put(fence);
2942                 }
2943                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2944                         struct amdgpu_ring *ring = adev->rings[i];
2945
2946                         if (!ring || !ring->sched.thread)
2947                                 continue;
2948
2949                         amd_sched_job_recovery(&ring->sched);
2950                         kthread_unpark(ring->sched.thread);
2951                 }
2952         } else {
2953                 dev_err(adev->dev, "asic resume failed (%d).\n", r);
2954                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
2955                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2956                         if (adev->rings[i] && adev->rings[i]->sched.thread) {
2957                                 kthread_unpark(adev->rings[i]->sched.thread);
2958                         }
2959                 }
2960         }
2961
2962         drm_helper_resume_force_mode(adev->ddev);
2963
2964         ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2965         if (r) {
2966                 /* bad news, how to tell it to userspace ? */
2967                 dev_info(adev->dev, "GPU reset failed\n");
2968                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
2969         }
2970         else {
2971                 dev_info(adev->dev, "GPU reset successed!\n");
2972         }
2973
2974         amdgpu_vf_error_trans_all(adev);
2975         return r;
2976 }
2977
2978 void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2979 {
2980         u32 mask;
2981         int ret;
2982
2983         if (amdgpu_pcie_gen_cap)
2984                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2985
2986         if (amdgpu_pcie_lane_cap)
2987                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2988
2989         /* covers APUs as well */
2990         if (pci_is_root_bus(adev->pdev->bus)) {
2991                 if (adev->pm.pcie_gen_mask == 0)
2992                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2993                 if (adev->pm.pcie_mlw_mask == 0)
2994                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2995                 return;
2996         }
2997
2998         if (adev->pm.pcie_gen_mask == 0) {
2999                 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3000                 if (!ret) {
3001                         adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3002                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3003                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3004
3005                         if (mask & DRM_PCIE_SPEED_25)
3006                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3007                         if (mask & DRM_PCIE_SPEED_50)
3008                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3009                         if (mask & DRM_PCIE_SPEED_80)
3010                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3011                 } else {
3012                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3013                 }
3014         }
3015         if (adev->pm.pcie_mlw_mask == 0) {
3016                 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3017                 if (!ret) {
3018                         switch (mask) {
3019                         case 32:
3020                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3021                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3022                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3023                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3024                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3025                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3026                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3027                                 break;
3028                         case 16:
3029                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3030                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3031                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3032                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3033                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3034                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3035                                 break;
3036                         case 12:
3037                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3038                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3039                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3040                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3041                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3042                                 break;
3043                         case 8:
3044                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3045                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3046                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3047                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3048                                 break;
3049                         case 4:
3050                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3051                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3052                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3053                                 break;
3054                         case 2:
3055                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3056                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3057                                 break;
3058                         case 1:
3059                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3060                                 break;
3061                         default:
3062                                 break;
3063                         }
3064                 } else {
3065                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3066                 }
3067         }
3068 }
3069
3070 /*
3071  * Debugfs
3072  */
3073 int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
3074                              const struct drm_info_list *files,
3075                              unsigned nfiles)
3076 {
3077         unsigned i;
3078
3079         for (i = 0; i < adev->debugfs_count; i++) {
3080                 if (adev->debugfs[i].files == files) {
3081                         /* Already registered */
3082                         return 0;
3083                 }
3084         }
3085
3086         i = adev->debugfs_count + 1;
3087         if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
3088                 DRM_ERROR("Reached maximum number of debugfs components.\n");
3089                 DRM_ERROR("Report so we increase "
3090                           "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
3091                 return -EINVAL;
3092         }
3093         adev->debugfs[adev->debugfs_count].files = files;
3094         adev->debugfs[adev->debugfs_count].num_files = nfiles;
3095         adev->debugfs_count = i;
3096 #if defined(CONFIG_DEBUG_FS)
3097         drm_debugfs_create_files(files, nfiles,
3098                                  adev->ddev->primary->debugfs_root,
3099                                  adev->ddev->primary);
3100 #endif
3101         return 0;
3102 }
3103
3104 #if defined(CONFIG_DEBUG_FS)
3105
3106 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
3107                                         size_t size, loff_t *pos)
3108 {
3109         struct amdgpu_device *adev = file_inode(f)->i_private;
3110         ssize_t result = 0;
3111         int r;
3112         bool pm_pg_lock, use_bank;
3113         unsigned instance_bank, sh_bank, se_bank;
3114
3115         if (size & 0x3 || *pos & 0x3)
3116                 return -EINVAL;
3117
3118         /* are we reading registers for which a PG lock is necessary? */
3119         pm_pg_lock = (*pos >> 23) & 1;
3120
3121         if (*pos & (1ULL << 62)) {
3122                 se_bank = (*pos >> 24) & 0x3FF;
3123                 sh_bank = (*pos >> 34) & 0x3FF;
3124                 instance_bank = (*pos >> 44) & 0x3FF;
3125
3126                 if (se_bank == 0x3FF)
3127                         se_bank = 0xFFFFFFFF;
3128                 if (sh_bank == 0x3FF)
3129                         sh_bank = 0xFFFFFFFF;
3130                 if (instance_bank == 0x3FF)
3131                         instance_bank = 0xFFFFFFFF;
3132                 use_bank = 1;
3133         } else {
3134                 use_bank = 0;
3135         }
3136
3137         *pos &= (1UL << 22) - 1;
3138
3139         if (use_bank) {
3140                 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3141                     (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3142                         return -EINVAL;
3143                 mutex_lock(&adev->grbm_idx_mutex);
3144                 amdgpu_gfx_select_se_sh(adev, se_bank,
3145                                         sh_bank, instance_bank);
3146         }
3147
3148         if (pm_pg_lock)
3149                 mutex_lock(&adev->pm.mutex);
3150
3151         while (size) {
3152                 uint32_t value;
3153
3154                 if (*pos > adev->rmmio_size)
3155                         goto end;
3156
3157                 value = RREG32(*pos >> 2);
3158                 r = put_user(value, (uint32_t *)buf);
3159                 if (r) {
3160                         result = r;
3161                         goto end;
3162                 }
3163
3164                 result += 4;
3165                 buf += 4;
3166                 *pos += 4;
3167                 size -= 4;
3168         }
3169
3170 end:
3171         if (use_bank) {
3172                 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3173                 mutex_unlock(&adev->grbm_idx_mutex);
3174         }
3175
3176         if (pm_pg_lock)
3177                 mutex_unlock(&adev->pm.mutex);
3178
3179         return result;
3180 }
3181
3182 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3183                                          size_t size, loff_t *pos)
3184 {
3185         struct amdgpu_device *adev = file_inode(f)->i_private;
3186         ssize_t result = 0;
3187         int r;
3188         bool pm_pg_lock, use_bank;
3189         unsigned instance_bank, sh_bank, se_bank;
3190
3191         if (size & 0x3 || *pos & 0x3)
3192                 return -EINVAL;
3193
3194         /* are we reading registers for which a PG lock is necessary? */
3195         pm_pg_lock = (*pos >> 23) & 1;
3196
3197         if (*pos & (1ULL << 62)) {
3198                 se_bank = (*pos >> 24) & 0x3FF;
3199                 sh_bank = (*pos >> 34) & 0x3FF;
3200                 instance_bank = (*pos >> 44) & 0x3FF;
3201
3202                 if (se_bank == 0x3FF)
3203                         se_bank = 0xFFFFFFFF;
3204                 if (sh_bank == 0x3FF)
3205                         sh_bank = 0xFFFFFFFF;
3206                 if (instance_bank == 0x3FF)
3207                         instance_bank = 0xFFFFFFFF;
3208                 use_bank = 1;
3209         } else {
3210                 use_bank = 0;
3211         }
3212
3213         *pos &= (1UL << 22) - 1;
3214
3215         if (use_bank) {
3216                 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3217                     (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3218                         return -EINVAL;
3219                 mutex_lock(&adev->grbm_idx_mutex);
3220                 amdgpu_gfx_select_se_sh(adev, se_bank,
3221                                         sh_bank, instance_bank);
3222         }
3223
3224         if (pm_pg_lock)
3225                 mutex_lock(&adev->pm.mutex);
3226
3227         while (size) {
3228                 uint32_t value;
3229
3230                 if (*pos > adev->rmmio_size)
3231                         return result;
3232
3233                 r = get_user(value, (uint32_t *)buf);
3234                 if (r)
3235                         return r;
3236
3237                 WREG32(*pos >> 2, value);
3238
3239                 result += 4;
3240                 buf += 4;
3241                 *pos += 4;
3242                 size -= 4;
3243         }
3244
3245         if (use_bank) {
3246                 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3247                 mutex_unlock(&adev->grbm_idx_mutex);
3248         }
3249
3250         if (pm_pg_lock)
3251                 mutex_unlock(&adev->pm.mutex);
3252
3253         return result;
3254 }
3255
3256 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3257                                         size_t size, loff_t *pos)
3258 {
3259         struct amdgpu_device *adev = file_inode(f)->i_private;
3260         ssize_t result = 0;
3261         int r;
3262
3263         if (size & 0x3 || *pos & 0x3)
3264                 return -EINVAL;
3265
3266         while (size) {
3267                 uint32_t value;
3268
3269                 value = RREG32_PCIE(*pos >> 2);
3270                 r = put_user(value, (uint32_t *)buf);
3271                 if (r)
3272                         return r;
3273
3274                 result += 4;
3275                 buf += 4;
3276                 *pos += 4;
3277                 size -= 4;
3278         }
3279
3280         return result;
3281 }
3282
3283 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3284                                          size_t size, loff_t *pos)
3285 {
3286         struct amdgpu_device *adev = file_inode(f)->i_private;
3287         ssize_t result = 0;
3288         int r;
3289
3290         if (size & 0x3 || *pos & 0x3)
3291                 return -EINVAL;
3292
3293         while (size) {
3294                 uint32_t value;
3295
3296                 r = get_user(value, (uint32_t *)buf);
3297                 if (r)
3298                         return r;
3299
3300                 WREG32_PCIE(*pos >> 2, value);
3301
3302                 result += 4;
3303                 buf += 4;
3304                 *pos += 4;
3305                 size -= 4;
3306         }
3307
3308         return result;
3309 }
3310
3311 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3312                                         size_t size, loff_t *pos)
3313 {
3314         struct amdgpu_device *adev = file_inode(f)->i_private;
3315         ssize_t result = 0;
3316         int r;
3317
3318         if (size & 0x3 || *pos & 0x3)
3319                 return -EINVAL;
3320
3321         while (size) {
3322                 uint32_t value;
3323
3324                 value = RREG32_DIDT(*pos >> 2);
3325                 r = put_user(value, (uint32_t *)buf);
3326                 if (r)
3327                         return r;
3328
3329                 result += 4;
3330                 buf += 4;
3331                 *pos += 4;
3332                 size -= 4;
3333         }
3334
3335         return result;
3336 }
3337
3338 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3339                                          size_t size, loff_t *pos)
3340 {
3341         struct amdgpu_device *adev = file_inode(f)->i_private;
3342         ssize_t result = 0;
3343         int r;
3344
3345         if (size & 0x3 || *pos & 0x3)
3346                 return -EINVAL;
3347
3348         while (size) {
3349                 uint32_t value;
3350
3351                 r = get_user(value, (uint32_t *)buf);
3352                 if (r)
3353                         return r;
3354
3355                 WREG32_DIDT(*pos >> 2, value);
3356
3357                 result += 4;
3358                 buf += 4;
3359                 *pos += 4;
3360                 size -= 4;
3361         }
3362
3363         return result;
3364 }
3365
3366 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3367                                         size_t size, loff_t *pos)
3368 {
3369         struct amdgpu_device *adev = file_inode(f)->i_private;
3370         ssize_t result = 0;
3371         int r;
3372
3373         if (size & 0x3 || *pos & 0x3)
3374                 return -EINVAL;
3375
3376         while (size) {
3377                 uint32_t value;
3378
3379                 value = RREG32_SMC(*pos);
3380                 r = put_user(value, (uint32_t *)buf);
3381                 if (r)
3382                         return r;
3383
3384                 result += 4;
3385                 buf += 4;
3386                 *pos += 4;
3387                 size -= 4;
3388         }
3389
3390         return result;
3391 }
3392
3393 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3394                                          size_t size, loff_t *pos)
3395 {
3396         struct amdgpu_device *adev = file_inode(f)->i_private;
3397         ssize_t result = 0;
3398         int r;
3399
3400         if (size & 0x3 || *pos & 0x3)
3401                 return -EINVAL;
3402
3403         while (size) {
3404                 uint32_t value;
3405
3406                 r = get_user(value, (uint32_t *)buf);
3407                 if (r)
3408                         return r;
3409
3410                 WREG32_SMC(*pos, value);
3411
3412                 result += 4;
3413                 buf += 4;
3414                 *pos += 4;
3415                 size -= 4;
3416         }
3417
3418         return result;
3419 }
3420
3421 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3422                                         size_t size, loff_t *pos)
3423 {
3424         struct amdgpu_device *adev = file_inode(f)->i_private;
3425         ssize_t result = 0;
3426         int r;
3427         uint32_t *config, no_regs = 0;
3428
3429         if (size & 0x3 || *pos & 0x3)
3430                 return -EINVAL;
3431
3432         config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
3433         if (!config)
3434                 return -ENOMEM;
3435
3436         /* version, increment each time something is added */
3437         config[no_regs++] = 3;
3438         config[no_regs++] = adev->gfx.config.max_shader_engines;
3439         config[no_regs++] = adev->gfx.config.max_tile_pipes;
3440         config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3441         config[no_regs++] = adev->gfx.config.max_sh_per_se;
3442         config[no_regs++] = adev->gfx.config.max_backends_per_se;
3443         config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3444         config[no_regs++] = adev->gfx.config.max_gprs;
3445         config[no_regs++] = adev->gfx.config.max_gs_threads;
3446         config[no_regs++] = adev->gfx.config.max_hw_contexts;
3447         config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3448         config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3449         config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3450         config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3451         config[no_regs++] = adev->gfx.config.num_tile_pipes;
3452         config[no_regs++] = adev->gfx.config.backend_enable_mask;
3453         config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3454         config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3455         config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3456         config[no_regs++] = adev->gfx.config.num_gpus;
3457         config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3458         config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3459         config[no_regs++] = adev->gfx.config.gb_addr_config;
3460         config[no_regs++] = adev->gfx.config.num_rbs;
3461
3462         /* rev==1 */
3463         config[no_regs++] = adev->rev_id;
3464         config[no_regs++] = adev->pg_flags;
3465         config[no_regs++] = adev->cg_flags;
3466
3467         /* rev==2 */
3468         config[no_regs++] = adev->family;
3469         config[no_regs++] = adev->external_rev_id;
3470
3471         /* rev==3 */
3472         config[no_regs++] = adev->pdev->device;
3473         config[no_regs++] = adev->pdev->revision;
3474         config[no_regs++] = adev->pdev->subsystem_device;
3475         config[no_regs++] = adev->pdev->subsystem_vendor;
3476
3477         while (size && (*pos < no_regs * 4)) {
3478                 uint32_t value;
3479
3480                 value = config[*pos >> 2];
3481                 r = put_user(value, (uint32_t *)buf);
3482                 if (r) {
3483                         kfree(config);
3484                         return r;
3485                 }
3486
3487                 result += 4;
3488                 buf += 4;
3489                 *pos += 4;
3490                 size -= 4;
3491         }
3492
3493         kfree(config);
3494         return result;
3495 }
3496
3497 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3498                                         size_t size, loff_t *pos)
3499 {
3500         struct amdgpu_device *adev = file_inode(f)->i_private;
3501         int idx, x, outsize, r, valuesize;
3502         uint32_t values[16];
3503
3504         if (size & 3 || *pos & 0x3)
3505                 return -EINVAL;
3506
3507         if (amdgpu_dpm == 0)
3508                 return -EINVAL;
3509
3510         /* convert offset to sensor number */
3511         idx = *pos >> 2;
3512
3513         valuesize = sizeof(values);
3514         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
3515                 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
3516         else
3517                 return -EINVAL;
3518
3519         if (size > valuesize)
3520                 return -EINVAL;
3521
3522         outsize = 0;
3523         x = 0;
3524         if (!r) {
3525                 while (size) {
3526                         r = put_user(values[x++], (int32_t *)buf);
3527                         buf += 4;
3528                         size -= 4;
3529                         outsize += 4;
3530                 }
3531         }
3532
3533         return !r ? outsize : r;
3534 }
3535
3536 static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3537                                         size_t size, loff_t *pos)
3538 {
3539         struct amdgpu_device *adev = f->f_inode->i_private;
3540         int r, x;
3541         ssize_t result=0;
3542         uint32_t offset, se, sh, cu, wave, simd, data[32];
3543
3544         if (size & 3 || *pos & 3)
3545                 return -EINVAL;
3546
3547         /* decode offset */
3548         offset = (*pos & 0x7F);
3549         se = ((*pos >> 7) & 0xFF);
3550         sh = ((*pos >> 15) & 0xFF);
3551         cu = ((*pos >> 23) & 0xFF);
3552         wave = ((*pos >> 31) & 0xFF);
3553         simd = ((*pos >> 37) & 0xFF);
3554
3555         /* switch to the specific se/sh/cu */
3556         mutex_lock(&adev->grbm_idx_mutex);
3557         amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3558
3559         x = 0;
3560         if (adev->gfx.funcs->read_wave_data)
3561                 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
3562
3563         amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3564         mutex_unlock(&adev->grbm_idx_mutex);
3565
3566         if (!x)
3567                 return -EINVAL;
3568
3569         while (size && (offset < x * 4)) {
3570                 uint32_t value;
3571
3572                 value = data[offset >> 2];
3573                 r = put_user(value, (uint32_t *)buf);
3574                 if (r)
3575                         return r;
3576
3577                 result += 4;
3578                 buf += 4;
3579                 offset += 4;
3580                 size -= 4;
3581         }
3582
3583         return result;
3584 }
3585
3586 static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3587                                         size_t size, loff_t *pos)
3588 {
3589         struct amdgpu_device *adev = f->f_inode->i_private;
3590         int r;
3591         ssize_t result = 0;
3592         uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3593
3594         if (size & 3 || *pos & 3)
3595                 return -EINVAL;
3596
3597         /* decode offset */
3598         offset = (*pos & 0xFFF);       /* in dwords */
3599         se = ((*pos >> 12) & 0xFF);
3600         sh = ((*pos >> 20) & 0xFF);
3601         cu = ((*pos >> 28) & 0xFF);
3602         wave = ((*pos >> 36) & 0xFF);
3603         simd = ((*pos >> 44) & 0xFF);
3604         thread = ((*pos >> 52) & 0xFF);
3605         bank = ((*pos >> 60) & 1);
3606
3607         data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3608         if (!data)
3609                 return -ENOMEM;
3610
3611         /* switch to the specific se/sh/cu */
3612         mutex_lock(&adev->grbm_idx_mutex);
3613         amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3614
3615         if (bank == 0) {
3616                 if (adev->gfx.funcs->read_wave_vgprs)
3617                         adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3618         } else {
3619                 if (adev->gfx.funcs->read_wave_sgprs)
3620                         adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3621         }
3622
3623         amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3624         mutex_unlock(&adev->grbm_idx_mutex);
3625
3626         while (size) {
3627                 uint32_t value;
3628
3629                 value = data[offset++];
3630                 r = put_user(value, (uint32_t *)buf);
3631                 if (r) {
3632                         result = r;
3633                         goto err;
3634                 }
3635
3636                 result += 4;
3637                 buf += 4;
3638                 size -= 4;
3639         }
3640
3641 err:
3642         kfree(data);
3643         return result;
3644 }
3645
3646 static const struct file_operations amdgpu_debugfs_regs_fops = {
3647         .owner = THIS_MODULE,
3648         .read = amdgpu_debugfs_regs_read,
3649         .write = amdgpu_debugfs_regs_write,
3650         .llseek = default_llseek
3651 };
3652 static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3653         .owner = THIS_MODULE,
3654         .read = amdgpu_debugfs_regs_didt_read,
3655         .write = amdgpu_debugfs_regs_didt_write,
3656         .llseek = default_llseek
3657 };
3658 static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3659         .owner = THIS_MODULE,
3660         .read = amdgpu_debugfs_regs_pcie_read,
3661         .write = amdgpu_debugfs_regs_pcie_write,
3662         .llseek = default_llseek
3663 };
3664 static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3665         .owner = THIS_MODULE,
3666         .read = amdgpu_debugfs_regs_smc_read,
3667         .write = amdgpu_debugfs_regs_smc_write,
3668         .llseek = default_llseek
3669 };
3670
3671 static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3672         .owner = THIS_MODULE,
3673         .read = amdgpu_debugfs_gca_config_read,
3674         .llseek = default_llseek
3675 };
3676
3677 static const struct file_operations amdgpu_debugfs_sensors_fops = {
3678         .owner = THIS_MODULE,
3679         .read = amdgpu_debugfs_sensor_read,
3680         .llseek = default_llseek
3681 };
3682
3683 static const struct file_operations amdgpu_debugfs_wave_fops = {
3684         .owner = THIS_MODULE,
3685         .read = amdgpu_debugfs_wave_read,
3686         .llseek = default_llseek
3687 };
3688 static const struct file_operations amdgpu_debugfs_gpr_fops = {
3689         .owner = THIS_MODULE,
3690         .read = amdgpu_debugfs_gpr_read,
3691         .llseek = default_llseek
3692 };
3693
3694 static const struct file_operations *debugfs_regs[] = {
3695         &amdgpu_debugfs_regs_fops,
3696         &amdgpu_debugfs_regs_didt_fops,
3697         &amdgpu_debugfs_regs_pcie_fops,
3698         &amdgpu_debugfs_regs_smc_fops,
3699         &amdgpu_debugfs_gca_config_fops,
3700         &amdgpu_debugfs_sensors_fops,
3701         &amdgpu_debugfs_wave_fops,
3702         &amdgpu_debugfs_gpr_fops,
3703 };
3704
3705 static const char *debugfs_regs_names[] = {
3706         "amdgpu_regs",
3707         "amdgpu_regs_didt",
3708         "amdgpu_regs_pcie",
3709         "amdgpu_regs_smc",
3710         "amdgpu_gca_config",
3711         "amdgpu_sensors",
3712         "amdgpu_wave",
3713         "amdgpu_gpr",
3714 };
3715
3716 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3717 {
3718         struct drm_minor *minor = adev->ddev->primary;
3719         struct dentry *ent, *root = minor->debugfs_root;
3720         unsigned i, j;
3721
3722         for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3723                 ent = debugfs_create_file(debugfs_regs_names[i],
3724                                           S_IFREG | S_IRUGO, root,
3725                                           adev, debugfs_regs[i]);
3726                 if (IS_ERR(ent)) {
3727                         for (j = 0; j < i; j++) {
3728                                 debugfs_remove(adev->debugfs_regs[i]);
3729                                 adev->debugfs_regs[i] = NULL;
3730                         }
3731                         return PTR_ERR(ent);
3732                 }
3733
3734                 if (!i)
3735                         i_size_write(ent->d_inode, adev->rmmio_size);
3736                 adev->debugfs_regs[i] = ent;
3737         }
3738
3739         return 0;
3740 }
3741
3742 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3743 {
3744         unsigned i;
3745
3746         for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3747                 if (adev->debugfs_regs[i]) {
3748                         debugfs_remove(adev->debugfs_regs[i]);
3749                         adev->debugfs_regs[i] = NULL;
3750                 }
3751         }
3752 }
3753
3754 static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
3755 {
3756         struct drm_info_node *node = (struct drm_info_node *) m->private;
3757         struct drm_device *dev = node->minor->dev;
3758         struct amdgpu_device *adev = dev->dev_private;
3759         int r = 0, i;
3760
3761         /* hold on the scheduler */
3762         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3763                 struct amdgpu_ring *ring = adev->rings[i];
3764
3765                 if (!ring || !ring->sched.thread)
3766                         continue;
3767                 kthread_park(ring->sched.thread);
3768         }
3769
3770         seq_printf(m, "run ib test:\n");
3771         r = amdgpu_ib_ring_tests(adev);
3772         if (r)
3773                 seq_printf(m, "ib ring tests failed (%d).\n", r);
3774         else
3775                 seq_printf(m, "ib ring tests passed.\n");
3776
3777         /* go on the scheduler */
3778         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3779                 struct amdgpu_ring *ring = adev->rings[i];
3780
3781                 if (!ring || !ring->sched.thread)
3782                         continue;
3783                 kthread_unpark(ring->sched.thread);
3784         }
3785
3786         return 0;
3787 }
3788
3789 static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
3790         {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
3791 };
3792
3793 static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
3794 {
3795         return amdgpu_debugfs_add_files(adev,
3796                                         amdgpu_debugfs_test_ib_ring_list, 1);
3797 }
3798
3799 int amdgpu_debugfs_init(struct drm_minor *minor)
3800 {
3801         return 0;
3802 }
3803
3804 static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
3805 {
3806         struct drm_info_node *node = (struct drm_info_node *) m->private;
3807         struct drm_device *dev = node->minor->dev;
3808         struct amdgpu_device *adev = dev->dev_private;
3809
3810         seq_write(m, adev->bios, adev->bios_size);
3811         return 0;
3812 }
3813
3814 static const struct drm_info_list amdgpu_vbios_dump_list[] = {
3815                 {"amdgpu_vbios",
3816                  amdgpu_debugfs_get_vbios_dump,
3817                  0, NULL},
3818 };
3819
3820 static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
3821 {
3822         return amdgpu_debugfs_add_files(adev,
3823                                         amdgpu_vbios_dump_list, 1);
3824 }
3825 #else
3826 static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
3827 {
3828         return 0;
3829 }
3830 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3831 {
3832         return 0;
3833 }
3834 static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
3835 {
3836         return 0;
3837 }
3838 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
3839 #endif
This page took 0.253806 seconds and 4 git commands to generate.