]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
Merge tag 'for-linus-6.0-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_probe_helper.h>
41 #include <drm/amdgpu_drm.h>
42 #include <linux/vgaarb.h>
43 #include <linux/vga_switcheroo.h>
44 #include <linux/efi.h>
45 #include "amdgpu.h"
46 #include "amdgpu_trace.h"
47 #include "amdgpu_i2c.h"
48 #include "atom.h"
49 #include "amdgpu_atombios.h"
50 #include "amdgpu_atomfirmware.h"
51 #include "amd_pcie.h"
52 #ifdef CONFIG_DRM_AMDGPU_SI
53 #include "si.h"
54 #endif
55 #ifdef CONFIG_DRM_AMDGPU_CIK
56 #include "cik.h"
57 #endif
58 #include "vi.h"
59 #include "soc15.h"
60 #include "nv.h"
61 #include "bif/bif_4_1_d.h"
62 #include <linux/firmware.h>
63 #include "amdgpu_vf_error.h"
64
65 #include "amdgpu_amdkfd.h"
66 #include "amdgpu_pm.h"
67
68 #include "amdgpu_xgmi.h"
69 #include "amdgpu_ras.h"
70 #include "amdgpu_pmu.h"
71 #include "amdgpu_fru_eeprom.h"
72 #include "amdgpu_reset.h"
73
74 #include <linux/suspend.h>
75 #include <drm/task_barrier.h>
76 #include <linux/pm_runtime.h>
77
78 #include <drm/drm_drv.h>
79
80 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
87
88 #define AMDGPU_RESUME_MS                2000
89 #define AMDGPU_MAX_RETRY_LIMIT          2
90 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
91
92 const char *amdgpu_asic_name[] = {
93         "TAHITI",
94         "PITCAIRN",
95         "VERDE",
96         "OLAND",
97         "HAINAN",
98         "BONAIRE",
99         "KAVERI",
100         "KABINI",
101         "HAWAII",
102         "MULLINS",
103         "TOPAZ",
104         "TONGA",
105         "FIJI",
106         "CARRIZO",
107         "STONEY",
108         "POLARIS10",
109         "POLARIS11",
110         "POLARIS12",
111         "VEGAM",
112         "VEGA10",
113         "VEGA12",
114         "VEGA20",
115         "RAVEN",
116         "ARCTURUS",
117         "RENOIR",
118         "ALDEBARAN",
119         "NAVI10",
120         "CYAN_SKILLFISH",
121         "NAVI14",
122         "NAVI12",
123         "SIENNA_CICHLID",
124         "NAVY_FLOUNDER",
125         "VANGOGH",
126         "DIMGREY_CAVEFISH",
127         "BEIGE_GOBY",
128         "YELLOW_CARP",
129         "IP DISCOVERY",
130         "LAST",
131 };
132
133 /**
134  * DOC: pcie_replay_count
135  *
136  * The amdgpu driver provides a sysfs API for reporting the total number
137  * of PCIe replays (NAKs)
138  * The file pcie_replay_count is used for this and returns the total
139  * number of replays as a sum of the NAKs generated and NAKs received
140  */
141
142 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
143                 struct device_attribute *attr, char *buf)
144 {
145         struct drm_device *ddev = dev_get_drvdata(dev);
146         struct amdgpu_device *adev = drm_to_adev(ddev);
147         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
148
149         return sysfs_emit(buf, "%llu\n", cnt);
150 }
151
152 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
153                 amdgpu_device_get_pcie_replay_count, NULL);
154
155 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
156
157 /**
158  * DOC: product_name
159  *
160  * The amdgpu driver provides a sysfs API for reporting the product name
161  * for the device
162  * The file serial_number is used for this and returns the product name
163  * as returned from the FRU.
164  * NOTE: This is only available for certain server cards
165  */
166
167 static ssize_t amdgpu_device_get_product_name(struct device *dev,
168                 struct device_attribute *attr, char *buf)
169 {
170         struct drm_device *ddev = dev_get_drvdata(dev);
171         struct amdgpu_device *adev = drm_to_adev(ddev);
172
173         return sysfs_emit(buf, "%s\n", adev->product_name);
174 }
175
176 static DEVICE_ATTR(product_name, S_IRUGO,
177                 amdgpu_device_get_product_name, NULL);
178
179 /**
180  * DOC: product_number
181  *
182  * The amdgpu driver provides a sysfs API for reporting the part number
183  * for the device
184  * The file serial_number is used for this and returns the part number
185  * as returned from the FRU.
186  * NOTE: This is only available for certain server cards
187  */
188
189 static ssize_t amdgpu_device_get_product_number(struct device *dev,
190                 struct device_attribute *attr, char *buf)
191 {
192         struct drm_device *ddev = dev_get_drvdata(dev);
193         struct amdgpu_device *adev = drm_to_adev(ddev);
194
195         return sysfs_emit(buf, "%s\n", adev->product_number);
196 }
197
198 static DEVICE_ATTR(product_number, S_IRUGO,
199                 amdgpu_device_get_product_number, NULL);
200
201 /**
202  * DOC: serial_number
203  *
204  * The amdgpu driver provides a sysfs API for reporting the serial number
205  * for the device
206  * The file serial_number is used for this and returns the serial number
207  * as returned from the FRU.
208  * NOTE: This is only available for certain server cards
209  */
210
211 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
212                 struct device_attribute *attr, char *buf)
213 {
214         struct drm_device *ddev = dev_get_drvdata(dev);
215         struct amdgpu_device *adev = drm_to_adev(ddev);
216
217         return sysfs_emit(buf, "%s\n", adev->serial);
218 }
219
220 static DEVICE_ATTR(serial_number, S_IRUGO,
221                 amdgpu_device_get_serial_number, NULL);
222
223 /**
224  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
225  *
226  * @dev: drm_device pointer
227  *
228  * Returns true if the device is a dGPU with ATPX power control,
229  * otherwise return false.
230  */
231 bool amdgpu_device_supports_px(struct drm_device *dev)
232 {
233         struct amdgpu_device *adev = drm_to_adev(dev);
234
235         if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
236                 return true;
237         return false;
238 }
239
240 /**
241  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
242  *
243  * @dev: drm_device pointer
244  *
245  * Returns true if the device is a dGPU with ACPI power control,
246  * otherwise return false.
247  */
248 bool amdgpu_device_supports_boco(struct drm_device *dev)
249 {
250         struct amdgpu_device *adev = drm_to_adev(dev);
251
252         if (adev->has_pr3 ||
253             ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
254                 return true;
255         return false;
256 }
257
258 /**
259  * amdgpu_device_supports_baco - Does the device support BACO
260  *
261  * @dev: drm_device pointer
262  *
263  * Returns true if the device supporte BACO,
264  * otherwise return false.
265  */
266 bool amdgpu_device_supports_baco(struct drm_device *dev)
267 {
268         struct amdgpu_device *adev = drm_to_adev(dev);
269
270         return amdgpu_asic_supports_baco(adev);
271 }
272
273 /**
274  * amdgpu_device_supports_smart_shift - Is the device dGPU with
275  * smart shift support
276  *
277  * @dev: drm_device pointer
278  *
279  * Returns true if the device is a dGPU with Smart Shift support,
280  * otherwise returns false.
281  */
282 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
283 {
284         return (amdgpu_device_supports_boco(dev) &&
285                 amdgpu_acpi_is_power_shift_control_supported());
286 }
287
288 /*
289  * VRAM access helper functions
290  */
291
292 /**
293  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
294  *
295  * @adev: amdgpu_device pointer
296  * @pos: offset of the buffer in vram
297  * @buf: virtual address of the buffer in system memory
298  * @size: read/write size, sizeof(@buf) must > @size
299  * @write: true - write to vram, otherwise - read from vram
300  */
301 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
302                              void *buf, size_t size, bool write)
303 {
304         unsigned long flags;
305         uint32_t hi = ~0, tmp = 0;
306         uint32_t *data = buf;
307         uint64_t last;
308         int idx;
309
310         if (!drm_dev_enter(adev_to_drm(adev), &idx))
311                 return;
312
313         BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
314
315         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
316         for (last = pos + size; pos < last; pos += 4) {
317                 tmp = pos >> 31;
318
319                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
320                 if (tmp != hi) {
321                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
322                         hi = tmp;
323                 }
324                 if (write)
325                         WREG32_NO_KIQ(mmMM_DATA, *data++);
326                 else
327                         *data++ = RREG32_NO_KIQ(mmMM_DATA);
328         }
329
330         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
331         drm_dev_exit(idx);
332 }
333
334 /**
335  * amdgpu_device_aper_access - access vram by vram aperature
336  *
337  * @adev: amdgpu_device pointer
338  * @pos: offset of the buffer in vram
339  * @buf: virtual address of the buffer in system memory
340  * @size: read/write size, sizeof(@buf) must > @size
341  * @write: true - write to vram, otherwise - read from vram
342  *
343  * The return value means how many bytes have been transferred.
344  */
345 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
346                                  void *buf, size_t size, bool write)
347 {
348 #ifdef CONFIG_64BIT
349         void __iomem *addr;
350         size_t count = 0;
351         uint64_t last;
352
353         if (!adev->mman.aper_base_kaddr)
354                 return 0;
355
356         last = min(pos + size, adev->gmc.visible_vram_size);
357         if (last > pos) {
358                 addr = adev->mman.aper_base_kaddr + pos;
359                 count = last - pos;
360
361                 if (write) {
362                         memcpy_toio(addr, buf, count);
363                         mb();
364                         amdgpu_device_flush_hdp(adev, NULL);
365                 } else {
366                         amdgpu_device_invalidate_hdp(adev, NULL);
367                         mb();
368                         memcpy_fromio(buf, addr, count);
369                 }
370
371         }
372
373         return count;
374 #else
375         return 0;
376 #endif
377 }
378
379 /**
380  * amdgpu_device_vram_access - read/write a buffer in vram
381  *
382  * @adev: amdgpu_device pointer
383  * @pos: offset of the buffer in vram
384  * @buf: virtual address of the buffer in system memory
385  * @size: read/write size, sizeof(@buf) must > @size
386  * @write: true - write to vram, otherwise - read from vram
387  */
388 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
389                                void *buf, size_t size, bool write)
390 {
391         size_t count;
392
393         /* try to using vram apreature to access vram first */
394         count = amdgpu_device_aper_access(adev, pos, buf, size, write);
395         size -= count;
396         if (size) {
397                 /* using MM to access rest vram */
398                 pos += count;
399                 buf += count;
400                 amdgpu_device_mm_access(adev, pos, buf, size, write);
401         }
402 }
403
404 /*
405  * register access helper functions.
406  */
407
408 /* Check if hw access should be skipped because of hotplug or device error */
409 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
410 {
411         if (adev->no_hw_access)
412                 return true;
413
414 #ifdef CONFIG_LOCKDEP
415         /*
416          * This is a bit complicated to understand, so worth a comment. What we assert
417          * here is that the GPU reset is not running on another thread in parallel.
418          *
419          * For this we trylock the read side of the reset semaphore, if that succeeds
420          * we know that the reset is not running in paralell.
421          *
422          * If the trylock fails we assert that we are either already holding the read
423          * side of the lock or are the reset thread itself and hold the write side of
424          * the lock.
425          */
426         if (in_task()) {
427                 if (down_read_trylock(&adev->reset_domain->sem))
428                         up_read(&adev->reset_domain->sem);
429                 else
430                         lockdep_assert_held(&adev->reset_domain->sem);
431         }
432 #endif
433         return false;
434 }
435
436 /**
437  * amdgpu_device_rreg - read a memory mapped IO or indirect register
438  *
439  * @adev: amdgpu_device pointer
440  * @reg: dword aligned register offset
441  * @acc_flags: access flags which require special behavior
442  *
443  * Returns the 32 bit value from the offset specified.
444  */
445 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
446                             uint32_t reg, uint32_t acc_flags)
447 {
448         uint32_t ret;
449
450         if (amdgpu_device_skip_hw_access(adev))
451                 return 0;
452
453         if ((reg * 4) < adev->rmmio_size) {
454                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
455                     amdgpu_sriov_runtime(adev) &&
456                     down_read_trylock(&adev->reset_domain->sem)) {
457                         ret = amdgpu_kiq_rreg(adev, reg);
458                         up_read(&adev->reset_domain->sem);
459                 } else {
460                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
461                 }
462         } else {
463                 ret = adev->pcie_rreg(adev, reg * 4);
464         }
465
466         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
467
468         return ret;
469 }
470
471 /*
472  * MMIO register read with bytes helper functions
473  * @offset:bytes offset from MMIO start
474  *
475 */
476
477 /**
478  * amdgpu_mm_rreg8 - read a memory mapped IO register
479  *
480  * @adev: amdgpu_device pointer
481  * @offset: byte aligned register offset
482  *
483  * Returns the 8 bit value from the offset specified.
484  */
485 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
486 {
487         if (amdgpu_device_skip_hw_access(adev))
488                 return 0;
489
490         if (offset < adev->rmmio_size)
491                 return (readb(adev->rmmio + offset));
492         BUG();
493 }
494
495 /*
496  * MMIO register write with bytes helper functions
497  * @offset:bytes offset from MMIO start
498  * @value: the value want to be written to the register
499  *
500 */
501 /**
502  * amdgpu_mm_wreg8 - read a memory mapped IO register
503  *
504  * @adev: amdgpu_device pointer
505  * @offset: byte aligned register offset
506  * @value: 8 bit value to write
507  *
508  * Writes the value specified to the offset specified.
509  */
510 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
511 {
512         if (amdgpu_device_skip_hw_access(adev))
513                 return;
514
515         if (offset < adev->rmmio_size)
516                 writeb(value, adev->rmmio + offset);
517         else
518                 BUG();
519 }
520
521 /**
522  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
523  *
524  * @adev: amdgpu_device pointer
525  * @reg: dword aligned register offset
526  * @v: 32 bit value to write to the register
527  * @acc_flags: access flags which require special behavior
528  *
529  * Writes the value specified to the offset specified.
530  */
531 void amdgpu_device_wreg(struct amdgpu_device *adev,
532                         uint32_t reg, uint32_t v,
533                         uint32_t acc_flags)
534 {
535         if (amdgpu_device_skip_hw_access(adev))
536                 return;
537
538         if ((reg * 4) < adev->rmmio_size) {
539                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
540                     amdgpu_sriov_runtime(adev) &&
541                     down_read_trylock(&adev->reset_domain->sem)) {
542                         amdgpu_kiq_wreg(adev, reg, v);
543                         up_read(&adev->reset_domain->sem);
544                 } else {
545                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
546                 }
547         } else {
548                 adev->pcie_wreg(adev, reg * 4, v);
549         }
550
551         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
552 }
553
554 /**
555  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
556  *
557  * @adev: amdgpu_device pointer
558  * @reg: mmio/rlc register
559  * @v: value to write
560  *
561  * this function is invoked only for the debugfs register access
562  */
563 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
564                              uint32_t reg, uint32_t v)
565 {
566         if (amdgpu_device_skip_hw_access(adev))
567                 return;
568
569         if (amdgpu_sriov_fullaccess(adev) &&
570             adev->gfx.rlc.funcs &&
571             adev->gfx.rlc.funcs->is_rlcg_access_range) {
572                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
573                         return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
574         } else if ((reg * 4) >= adev->rmmio_size) {
575                 adev->pcie_wreg(adev, reg * 4, v);
576         } else {
577                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
578         }
579 }
580
581 /**
582  * amdgpu_mm_rdoorbell - read a doorbell dword
583  *
584  * @adev: amdgpu_device pointer
585  * @index: doorbell index
586  *
587  * Returns the value in the doorbell aperture at the
588  * requested doorbell index (CIK).
589  */
590 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
591 {
592         if (amdgpu_device_skip_hw_access(adev))
593                 return 0;
594
595         if (index < adev->doorbell.num_doorbells) {
596                 return readl(adev->doorbell.ptr + index);
597         } else {
598                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
599                 return 0;
600         }
601 }
602
603 /**
604  * amdgpu_mm_wdoorbell - write a doorbell dword
605  *
606  * @adev: amdgpu_device pointer
607  * @index: doorbell index
608  * @v: value to write
609  *
610  * Writes @v to the doorbell aperture at the
611  * requested doorbell index (CIK).
612  */
613 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
614 {
615         if (amdgpu_device_skip_hw_access(adev))
616                 return;
617
618         if (index < adev->doorbell.num_doorbells) {
619                 writel(v, adev->doorbell.ptr + index);
620         } else {
621                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
622         }
623 }
624
625 /**
626  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
627  *
628  * @adev: amdgpu_device pointer
629  * @index: doorbell index
630  *
631  * Returns the value in the doorbell aperture at the
632  * requested doorbell index (VEGA10+).
633  */
634 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
635 {
636         if (amdgpu_device_skip_hw_access(adev))
637                 return 0;
638
639         if (index < adev->doorbell.num_doorbells) {
640                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
641         } else {
642                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
643                 return 0;
644         }
645 }
646
647 /**
648  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
649  *
650  * @adev: amdgpu_device pointer
651  * @index: doorbell index
652  * @v: value to write
653  *
654  * Writes @v to the doorbell aperture at the
655  * requested doorbell index (VEGA10+).
656  */
657 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
658 {
659         if (amdgpu_device_skip_hw_access(adev))
660                 return;
661
662         if (index < adev->doorbell.num_doorbells) {
663                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
664         } else {
665                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
666         }
667 }
668
669 /**
670  * amdgpu_device_indirect_rreg - read an indirect register
671  *
672  * @adev: amdgpu_device pointer
673  * @pcie_index: mmio register offset
674  * @pcie_data: mmio register offset
675  * @reg_addr: indirect register address to read from
676  *
677  * Returns the value of indirect register @reg_addr
678  */
679 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
680                                 u32 pcie_index, u32 pcie_data,
681                                 u32 reg_addr)
682 {
683         unsigned long flags;
684         u32 r;
685         void __iomem *pcie_index_offset;
686         void __iomem *pcie_data_offset;
687
688         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
689         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
690         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
691
692         writel(reg_addr, pcie_index_offset);
693         readl(pcie_index_offset);
694         r = readl(pcie_data_offset);
695         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
696
697         return r;
698 }
699
700 /**
701  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
702  *
703  * @adev: amdgpu_device pointer
704  * @pcie_index: mmio register offset
705  * @pcie_data: mmio register offset
706  * @reg_addr: indirect register address to read from
707  *
708  * Returns the value of indirect register @reg_addr
709  */
710 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
711                                   u32 pcie_index, u32 pcie_data,
712                                   u32 reg_addr)
713 {
714         unsigned long flags;
715         u64 r;
716         void __iomem *pcie_index_offset;
717         void __iomem *pcie_data_offset;
718
719         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
720         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
721         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
722
723         /* read low 32 bits */
724         writel(reg_addr, pcie_index_offset);
725         readl(pcie_index_offset);
726         r = readl(pcie_data_offset);
727         /* read high 32 bits */
728         writel(reg_addr + 4, pcie_index_offset);
729         readl(pcie_index_offset);
730         r |= ((u64)readl(pcie_data_offset) << 32);
731         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
732
733         return r;
734 }
735
736 /**
737  * amdgpu_device_indirect_wreg - write an indirect register address
738  *
739  * @adev: amdgpu_device pointer
740  * @pcie_index: mmio register offset
741  * @pcie_data: mmio register offset
742  * @reg_addr: indirect register offset
743  * @reg_data: indirect register data
744  *
745  */
746 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
747                                  u32 pcie_index, u32 pcie_data,
748                                  u32 reg_addr, u32 reg_data)
749 {
750         unsigned long flags;
751         void __iomem *pcie_index_offset;
752         void __iomem *pcie_data_offset;
753
754         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
755         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
756         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
757
758         writel(reg_addr, pcie_index_offset);
759         readl(pcie_index_offset);
760         writel(reg_data, pcie_data_offset);
761         readl(pcie_data_offset);
762         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
763 }
764
765 /**
766  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
767  *
768  * @adev: amdgpu_device pointer
769  * @pcie_index: mmio register offset
770  * @pcie_data: mmio register offset
771  * @reg_addr: indirect register offset
772  * @reg_data: indirect register data
773  *
774  */
775 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
776                                    u32 pcie_index, u32 pcie_data,
777                                    u32 reg_addr, u64 reg_data)
778 {
779         unsigned long flags;
780         void __iomem *pcie_index_offset;
781         void __iomem *pcie_data_offset;
782
783         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
784         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
785         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
786
787         /* write low 32 bits */
788         writel(reg_addr, pcie_index_offset);
789         readl(pcie_index_offset);
790         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
791         readl(pcie_data_offset);
792         /* write high 32 bits */
793         writel(reg_addr + 4, pcie_index_offset);
794         readl(pcie_index_offset);
795         writel((u32)(reg_data >> 32), pcie_data_offset);
796         readl(pcie_data_offset);
797         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
798 }
799
800 /**
801  * amdgpu_invalid_rreg - dummy reg read function
802  *
803  * @adev: amdgpu_device pointer
804  * @reg: offset of register
805  *
806  * Dummy register read function.  Used for register blocks
807  * that certain asics don't have (all asics).
808  * Returns the value in the register.
809  */
810 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
811 {
812         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
813         BUG();
814         return 0;
815 }
816
817 /**
818  * amdgpu_invalid_wreg - dummy reg write function
819  *
820  * @adev: amdgpu_device pointer
821  * @reg: offset of register
822  * @v: value to write to the register
823  *
824  * Dummy register read function.  Used for register blocks
825  * that certain asics don't have (all asics).
826  */
827 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
828 {
829         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
830                   reg, v);
831         BUG();
832 }
833
834 /**
835  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
836  *
837  * @adev: amdgpu_device pointer
838  * @reg: offset of register
839  *
840  * Dummy register read function.  Used for register blocks
841  * that certain asics don't have (all asics).
842  * Returns the value in the register.
843  */
844 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
845 {
846         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
847         BUG();
848         return 0;
849 }
850
851 /**
852  * amdgpu_invalid_wreg64 - dummy reg write function
853  *
854  * @adev: amdgpu_device pointer
855  * @reg: offset of register
856  * @v: value to write to the register
857  *
858  * Dummy register read function.  Used for register blocks
859  * that certain asics don't have (all asics).
860  */
861 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
862 {
863         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
864                   reg, v);
865         BUG();
866 }
867
868 /**
869  * amdgpu_block_invalid_rreg - dummy reg read function
870  *
871  * @adev: amdgpu_device pointer
872  * @block: offset of instance
873  * @reg: offset of register
874  *
875  * Dummy register read function.  Used for register blocks
876  * that certain asics don't have (all asics).
877  * Returns the value in the register.
878  */
879 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
880                                           uint32_t block, uint32_t reg)
881 {
882         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
883                   reg, block);
884         BUG();
885         return 0;
886 }
887
888 /**
889  * amdgpu_block_invalid_wreg - dummy reg write function
890  *
891  * @adev: amdgpu_device pointer
892  * @block: offset of instance
893  * @reg: offset of register
894  * @v: value to write to the register
895  *
896  * Dummy register read function.  Used for register blocks
897  * that certain asics don't have (all asics).
898  */
899 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
900                                       uint32_t block,
901                                       uint32_t reg, uint32_t v)
902 {
903         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
904                   reg, block, v);
905         BUG();
906 }
907
908 /**
909  * amdgpu_device_asic_init - Wrapper for atom asic_init
910  *
911  * @adev: amdgpu_device pointer
912  *
913  * Does any asic specific work and then calls atom asic init.
914  */
915 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
916 {
917         amdgpu_asic_pre_asic_init(adev);
918
919         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
920                 return amdgpu_atomfirmware_asic_init(adev, true);
921         else
922                 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
923 }
924
925 /**
926  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
927  *
928  * @adev: amdgpu_device pointer
929  *
930  * Allocates a scratch page of VRAM for use by various things in the
931  * driver.
932  */
933 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
934 {
935         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
936                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
937                                        &adev->vram_scratch.robj,
938                                        &adev->vram_scratch.gpu_addr,
939                                        (void **)&adev->vram_scratch.ptr);
940 }
941
942 /**
943  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
944  *
945  * @adev: amdgpu_device pointer
946  *
947  * Frees the VRAM scratch page.
948  */
949 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
950 {
951         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
952 }
953
954 /**
955  * amdgpu_device_program_register_sequence - program an array of registers.
956  *
957  * @adev: amdgpu_device pointer
958  * @registers: pointer to the register array
959  * @array_size: size of the register array
960  *
961  * Programs an array or registers with and and or masks.
962  * This is a helper for setting golden registers.
963  */
964 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
965                                              const u32 *registers,
966                                              const u32 array_size)
967 {
968         u32 tmp, reg, and_mask, or_mask;
969         int i;
970
971         if (array_size % 3)
972                 return;
973
974         for (i = 0; i < array_size; i +=3) {
975                 reg = registers[i + 0];
976                 and_mask = registers[i + 1];
977                 or_mask = registers[i + 2];
978
979                 if (and_mask == 0xffffffff) {
980                         tmp = or_mask;
981                 } else {
982                         tmp = RREG32(reg);
983                         tmp &= ~and_mask;
984                         if (adev->family >= AMDGPU_FAMILY_AI)
985                                 tmp |= (or_mask & and_mask);
986                         else
987                                 tmp |= or_mask;
988                 }
989                 WREG32(reg, tmp);
990         }
991 }
992
993 /**
994  * amdgpu_device_pci_config_reset - reset the GPU
995  *
996  * @adev: amdgpu_device pointer
997  *
998  * Resets the GPU using the pci config reset sequence.
999  * Only applicable to asics prior to vega10.
1000  */
1001 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1002 {
1003         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1004 }
1005
1006 /**
1007  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1008  *
1009  * @adev: amdgpu_device pointer
1010  *
1011  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1012  */
1013 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1014 {
1015         return pci_reset_function(adev->pdev);
1016 }
1017
1018 /*
1019  * GPU doorbell aperture helpers function.
1020  */
1021 /**
1022  * amdgpu_device_doorbell_init - Init doorbell driver information.
1023  *
1024  * @adev: amdgpu_device pointer
1025  *
1026  * Init doorbell driver information (CIK)
1027  * Returns 0 on success, error on failure.
1028  */
1029 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1030 {
1031
1032         /* No doorbell on SI hardware generation */
1033         if (adev->asic_type < CHIP_BONAIRE) {
1034                 adev->doorbell.base = 0;
1035                 adev->doorbell.size = 0;
1036                 adev->doorbell.num_doorbells = 0;
1037                 adev->doorbell.ptr = NULL;
1038                 return 0;
1039         }
1040
1041         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1042                 return -EINVAL;
1043
1044         amdgpu_asic_init_doorbell_index(adev);
1045
1046         /* doorbell bar mapping */
1047         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1048         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1049
1050         if (adev->enable_mes) {
1051                 adev->doorbell.num_doorbells =
1052                         adev->doorbell.size / sizeof(u32);
1053         } else {
1054                 adev->doorbell.num_doorbells =
1055                         min_t(u32, adev->doorbell.size / sizeof(u32),
1056                               adev->doorbell_index.max_assignment+1);
1057                 if (adev->doorbell.num_doorbells == 0)
1058                         return -EINVAL;
1059
1060                 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1061                  * paging queue doorbell use the second page. The
1062                  * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1063                  * doorbells are in the first page. So with paging queue enabled,
1064                  * the max num_doorbells should + 1 page (0x400 in dword)
1065                  */
1066                 if (adev->asic_type >= CHIP_VEGA10)
1067                         adev->doorbell.num_doorbells += 0x400;
1068         }
1069
1070         adev->doorbell.ptr = ioremap(adev->doorbell.base,
1071                                      adev->doorbell.num_doorbells *
1072                                      sizeof(u32));
1073         if (adev->doorbell.ptr == NULL)
1074                 return -ENOMEM;
1075
1076         return 0;
1077 }
1078
1079 /**
1080  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1081  *
1082  * @adev: amdgpu_device pointer
1083  *
1084  * Tear down doorbell driver information (CIK)
1085  */
1086 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1087 {
1088         iounmap(adev->doorbell.ptr);
1089         adev->doorbell.ptr = NULL;
1090 }
1091
1092
1093
1094 /*
1095  * amdgpu_device_wb_*()
1096  * Writeback is the method by which the GPU updates special pages in memory
1097  * with the status of certain GPU events (fences, ring pointers,etc.).
1098  */
1099
1100 /**
1101  * amdgpu_device_wb_fini - Disable Writeback and free memory
1102  *
1103  * @adev: amdgpu_device pointer
1104  *
1105  * Disables Writeback and frees the Writeback memory (all asics).
1106  * Used at driver shutdown.
1107  */
1108 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1109 {
1110         if (adev->wb.wb_obj) {
1111                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1112                                       &adev->wb.gpu_addr,
1113                                       (void **)&adev->wb.wb);
1114                 adev->wb.wb_obj = NULL;
1115         }
1116 }
1117
1118 /**
1119  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1120  *
1121  * @adev: amdgpu_device pointer
1122  *
1123  * Initializes writeback and allocates writeback memory (all asics).
1124  * Used at driver startup.
1125  * Returns 0 on success or an -error on failure.
1126  */
1127 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1128 {
1129         int r;
1130
1131         if (adev->wb.wb_obj == NULL) {
1132                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1133                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1134                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1135                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1136                                             (void **)&adev->wb.wb);
1137                 if (r) {
1138                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1139                         return r;
1140                 }
1141
1142                 adev->wb.num_wb = AMDGPU_MAX_WB;
1143                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1144
1145                 /* clear wb memory */
1146                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1147         }
1148
1149         return 0;
1150 }
1151
1152 /**
1153  * amdgpu_device_wb_get - Allocate a wb entry
1154  *
1155  * @adev: amdgpu_device pointer
1156  * @wb: wb index
1157  *
1158  * Allocate a wb slot for use by the driver (all asics).
1159  * Returns 0 on success or -EINVAL on failure.
1160  */
1161 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1162 {
1163         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1164
1165         if (offset < adev->wb.num_wb) {
1166                 __set_bit(offset, adev->wb.used);
1167                 *wb = offset << 3; /* convert to dw offset */
1168                 return 0;
1169         } else {
1170                 return -EINVAL;
1171         }
1172 }
1173
1174 /**
1175  * amdgpu_device_wb_free - Free a wb entry
1176  *
1177  * @adev: amdgpu_device pointer
1178  * @wb: wb index
1179  *
1180  * Free a wb slot allocated for use by the driver (all asics)
1181  */
1182 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1183 {
1184         wb >>= 3;
1185         if (wb < adev->wb.num_wb)
1186                 __clear_bit(wb, adev->wb.used);
1187 }
1188
1189 /**
1190  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1191  *
1192  * @adev: amdgpu_device pointer
1193  *
1194  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1195  * to fail, but if any of the BARs is not accessible after the size we abort
1196  * driver loading by returning -ENODEV.
1197  */
1198 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1199 {
1200         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1201         struct pci_bus *root;
1202         struct resource *res;
1203         unsigned i;
1204         u16 cmd;
1205         int r;
1206
1207         /* Bypass for VF */
1208         if (amdgpu_sriov_vf(adev))
1209                 return 0;
1210
1211         /* skip if the bios has already enabled large BAR */
1212         if (adev->gmc.real_vram_size &&
1213             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1214                 return 0;
1215
1216         /* Check if the root BUS has 64bit memory resources */
1217         root = adev->pdev->bus;
1218         while (root->parent)
1219                 root = root->parent;
1220
1221         pci_bus_for_each_resource(root, res, i) {
1222                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1223                     res->start > 0x100000000ull)
1224                         break;
1225         }
1226
1227         /* Trying to resize is pointless without a root hub window above 4GB */
1228         if (!res)
1229                 return 0;
1230
1231         /* Limit the BAR size to what is available */
1232         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1233                         rbar_size);
1234
1235         /* Disable memory decoding while we change the BAR addresses and size */
1236         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1237         pci_write_config_word(adev->pdev, PCI_COMMAND,
1238                               cmd & ~PCI_COMMAND_MEMORY);
1239
1240         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1241         amdgpu_device_doorbell_fini(adev);
1242         if (adev->asic_type >= CHIP_BONAIRE)
1243                 pci_release_resource(adev->pdev, 2);
1244
1245         pci_release_resource(adev->pdev, 0);
1246
1247         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1248         if (r == -ENOSPC)
1249                 DRM_INFO("Not enough PCI address space for a large BAR.");
1250         else if (r && r != -ENOTSUPP)
1251                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1252
1253         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1254
1255         /* When the doorbell or fb BAR isn't available we have no chance of
1256          * using the device.
1257          */
1258         r = amdgpu_device_doorbell_init(adev);
1259         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1260                 return -ENODEV;
1261
1262         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1263
1264         return 0;
1265 }
1266
1267 /*
1268  * GPU helpers function.
1269  */
1270 /**
1271  * amdgpu_device_need_post - check if the hw need post or not
1272  *
1273  * @adev: amdgpu_device pointer
1274  *
1275  * Check if the asic has been initialized (all asics) at driver startup
1276  * or post is needed if  hw reset is performed.
1277  * Returns true if need or false if not.
1278  */
1279 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1280 {
1281         uint32_t reg;
1282
1283         if (amdgpu_sriov_vf(adev))
1284                 return false;
1285
1286         if (amdgpu_passthrough(adev)) {
1287                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1288                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1289                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1290                  * vpost executed for smc version below 22.15
1291                  */
1292                 if (adev->asic_type == CHIP_FIJI) {
1293                         int err;
1294                         uint32_t fw_ver;
1295                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1296                         /* force vPost if error occured */
1297                         if (err)
1298                                 return true;
1299
1300                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1301                         if (fw_ver < 0x00160e00)
1302                                 return true;
1303                 }
1304         }
1305
1306         /* Don't post if we need to reset whole hive on init */
1307         if (adev->gmc.xgmi.pending_reset)
1308                 return false;
1309
1310         if (adev->has_hw_reset) {
1311                 adev->has_hw_reset = false;
1312                 return true;
1313         }
1314
1315         /* bios scratch used on CIK+ */
1316         if (adev->asic_type >= CHIP_BONAIRE)
1317                 return amdgpu_atombios_scratch_need_asic_init(adev);
1318
1319         /* check MEM_SIZE for older asics */
1320         reg = amdgpu_asic_get_config_memsize(adev);
1321
1322         if ((reg != 0) && (reg != 0xffffffff))
1323                 return false;
1324
1325         return true;
1326 }
1327
1328 /**
1329  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1330  *
1331  * @adev: amdgpu_device pointer
1332  *
1333  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1334  * be set for this device.
1335  *
1336  * Returns true if it should be used or false if not.
1337  */
1338 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1339 {
1340         switch (amdgpu_aspm) {
1341         case -1:
1342                 break;
1343         case 0:
1344                 return false;
1345         case 1:
1346                 return true;
1347         default:
1348                 return false;
1349         }
1350         return pcie_aspm_enabled(adev->pdev);
1351 }
1352
1353 /* if we get transitioned to only one device, take VGA back */
1354 /**
1355  * amdgpu_device_vga_set_decode - enable/disable vga decode
1356  *
1357  * @pdev: PCI device pointer
1358  * @state: enable/disable vga decode
1359  *
1360  * Enable/disable vga decode (all asics).
1361  * Returns VGA resource flags.
1362  */
1363 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1364                 bool state)
1365 {
1366         struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1367         amdgpu_asic_set_vga_state(adev, state);
1368         if (state)
1369                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1370                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1371         else
1372                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1373 }
1374
1375 /**
1376  * amdgpu_device_check_block_size - validate the vm block size
1377  *
1378  * @adev: amdgpu_device pointer
1379  *
1380  * Validates the vm block size specified via module parameter.
1381  * The vm block size defines number of bits in page table versus page directory,
1382  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1383  * page table and the remaining bits are in the page directory.
1384  */
1385 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1386 {
1387         /* defines number of bits in page table versus page directory,
1388          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1389          * page table and the remaining bits are in the page directory */
1390         if (amdgpu_vm_block_size == -1)
1391                 return;
1392
1393         if (amdgpu_vm_block_size < 9) {
1394                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1395                          amdgpu_vm_block_size);
1396                 amdgpu_vm_block_size = -1;
1397         }
1398 }
1399
1400 /**
1401  * amdgpu_device_check_vm_size - validate the vm size
1402  *
1403  * @adev: amdgpu_device pointer
1404  *
1405  * Validates the vm size in GB specified via module parameter.
1406  * The VM size is the size of the GPU virtual memory space in GB.
1407  */
1408 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1409 {
1410         /* no need to check the default value */
1411         if (amdgpu_vm_size == -1)
1412                 return;
1413
1414         if (amdgpu_vm_size < 1) {
1415                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1416                          amdgpu_vm_size);
1417                 amdgpu_vm_size = -1;
1418         }
1419 }
1420
1421 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1422 {
1423         struct sysinfo si;
1424         bool is_os_64 = (sizeof(void *) == 8);
1425         uint64_t total_memory;
1426         uint64_t dram_size_seven_GB = 0x1B8000000;
1427         uint64_t dram_size_three_GB = 0xB8000000;
1428
1429         if (amdgpu_smu_memory_pool_size == 0)
1430                 return;
1431
1432         if (!is_os_64) {
1433                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1434                 goto def_value;
1435         }
1436         si_meminfo(&si);
1437         total_memory = (uint64_t)si.totalram * si.mem_unit;
1438
1439         if ((amdgpu_smu_memory_pool_size == 1) ||
1440                 (amdgpu_smu_memory_pool_size == 2)) {
1441                 if (total_memory < dram_size_three_GB)
1442                         goto def_value1;
1443         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1444                 (amdgpu_smu_memory_pool_size == 8)) {
1445                 if (total_memory < dram_size_seven_GB)
1446                         goto def_value1;
1447         } else {
1448                 DRM_WARN("Smu memory pool size not supported\n");
1449                 goto def_value;
1450         }
1451         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1452
1453         return;
1454
1455 def_value1:
1456         DRM_WARN("No enough system memory\n");
1457 def_value:
1458         adev->pm.smu_prv_buffer_size = 0;
1459 }
1460
1461 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1462 {
1463         if (!(adev->flags & AMD_IS_APU) ||
1464             adev->asic_type < CHIP_RAVEN)
1465                 return 0;
1466
1467         switch (adev->asic_type) {
1468         case CHIP_RAVEN:
1469                 if (adev->pdev->device == 0x15dd)
1470                         adev->apu_flags |= AMD_APU_IS_RAVEN;
1471                 if (adev->pdev->device == 0x15d8)
1472                         adev->apu_flags |= AMD_APU_IS_PICASSO;
1473                 break;
1474         case CHIP_RENOIR:
1475                 if ((adev->pdev->device == 0x1636) ||
1476                     (adev->pdev->device == 0x164c))
1477                         adev->apu_flags |= AMD_APU_IS_RENOIR;
1478                 else
1479                         adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1480                 break;
1481         case CHIP_VANGOGH:
1482                 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1483                 break;
1484         case CHIP_YELLOW_CARP:
1485                 break;
1486         case CHIP_CYAN_SKILLFISH:
1487                 if ((adev->pdev->device == 0x13FE) ||
1488                     (adev->pdev->device == 0x143F))
1489                         adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1490                 break;
1491         default:
1492                 break;
1493         }
1494
1495         return 0;
1496 }
1497
1498 /**
1499  * amdgpu_device_check_arguments - validate module params
1500  *
1501  * @adev: amdgpu_device pointer
1502  *
1503  * Validates certain module parameters and updates
1504  * the associated values used by the driver (all asics).
1505  */
1506 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1507 {
1508         if (amdgpu_sched_jobs < 4) {
1509                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1510                          amdgpu_sched_jobs);
1511                 amdgpu_sched_jobs = 4;
1512         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1513                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1514                          amdgpu_sched_jobs);
1515                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1516         }
1517
1518         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1519                 /* gart size must be greater or equal to 32M */
1520                 dev_warn(adev->dev, "gart size (%d) too small\n",
1521                          amdgpu_gart_size);
1522                 amdgpu_gart_size = -1;
1523         }
1524
1525         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1526                 /* gtt size must be greater or equal to 32M */
1527                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1528                                  amdgpu_gtt_size);
1529                 amdgpu_gtt_size = -1;
1530         }
1531
1532         /* valid range is between 4 and 9 inclusive */
1533         if (amdgpu_vm_fragment_size != -1 &&
1534             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1535                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1536                 amdgpu_vm_fragment_size = -1;
1537         }
1538
1539         if (amdgpu_sched_hw_submission < 2) {
1540                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1541                          amdgpu_sched_hw_submission);
1542                 amdgpu_sched_hw_submission = 2;
1543         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1544                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1545                          amdgpu_sched_hw_submission);
1546                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1547         }
1548
1549         if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1550                 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1551                 amdgpu_reset_method = -1;
1552         }
1553
1554         amdgpu_device_check_smu_prv_buffer_size(adev);
1555
1556         amdgpu_device_check_vm_size(adev);
1557
1558         amdgpu_device_check_block_size(adev);
1559
1560         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1561
1562         return 0;
1563 }
1564
1565 /**
1566  * amdgpu_switcheroo_set_state - set switcheroo state
1567  *
1568  * @pdev: pci dev pointer
1569  * @state: vga_switcheroo state
1570  *
1571  * Callback for the switcheroo driver.  Suspends or resumes the
1572  * the asics before or after it is powered up using ACPI methods.
1573  */
1574 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1575                                         enum vga_switcheroo_state state)
1576 {
1577         struct drm_device *dev = pci_get_drvdata(pdev);
1578         int r;
1579
1580         if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1581                 return;
1582
1583         if (state == VGA_SWITCHEROO_ON) {
1584                 pr_info("switched on\n");
1585                 /* don't suspend or resume card normally */
1586                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1587
1588                 pci_set_power_state(pdev, PCI_D0);
1589                 amdgpu_device_load_pci_state(pdev);
1590                 r = pci_enable_device(pdev);
1591                 if (r)
1592                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1593                 amdgpu_device_resume(dev, true);
1594
1595                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1596         } else {
1597                 pr_info("switched off\n");
1598                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1599                 amdgpu_device_suspend(dev, true);
1600                 amdgpu_device_cache_pci_state(pdev);
1601                 /* Shut down the device */
1602                 pci_disable_device(pdev);
1603                 pci_set_power_state(pdev, PCI_D3cold);
1604                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1605         }
1606 }
1607
1608 /**
1609  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1610  *
1611  * @pdev: pci dev pointer
1612  *
1613  * Callback for the switcheroo driver.  Check of the switcheroo
1614  * state can be changed.
1615  * Returns true if the state can be changed, false if not.
1616  */
1617 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1618 {
1619         struct drm_device *dev = pci_get_drvdata(pdev);
1620
1621         /*
1622         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1623         * locking inversion with the driver load path. And the access here is
1624         * completely racy anyway. So don't bother with locking for now.
1625         */
1626         return atomic_read(&dev->open_count) == 0;
1627 }
1628
1629 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1630         .set_gpu_state = amdgpu_switcheroo_set_state,
1631         .reprobe = NULL,
1632         .can_switch = amdgpu_switcheroo_can_switch,
1633 };
1634
1635 /**
1636  * amdgpu_device_ip_set_clockgating_state - set the CG state
1637  *
1638  * @dev: amdgpu_device pointer
1639  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1640  * @state: clockgating state (gate or ungate)
1641  *
1642  * Sets the requested clockgating state for all instances of
1643  * the hardware IP specified.
1644  * Returns the error code from the last instance.
1645  */
1646 int amdgpu_device_ip_set_clockgating_state(void *dev,
1647                                            enum amd_ip_block_type block_type,
1648                                            enum amd_clockgating_state state)
1649 {
1650         struct amdgpu_device *adev = dev;
1651         int i, r = 0;
1652
1653         for (i = 0; i < adev->num_ip_blocks; i++) {
1654                 if (!adev->ip_blocks[i].status.valid)
1655                         continue;
1656                 if (adev->ip_blocks[i].version->type != block_type)
1657                         continue;
1658                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1659                         continue;
1660                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1661                         (void *)adev, state);
1662                 if (r)
1663                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1664                                   adev->ip_blocks[i].version->funcs->name, r);
1665         }
1666         return r;
1667 }
1668
1669 /**
1670  * amdgpu_device_ip_set_powergating_state - set the PG state
1671  *
1672  * @dev: amdgpu_device pointer
1673  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1674  * @state: powergating state (gate or ungate)
1675  *
1676  * Sets the requested powergating state for all instances of
1677  * the hardware IP specified.
1678  * Returns the error code from the last instance.
1679  */
1680 int amdgpu_device_ip_set_powergating_state(void *dev,
1681                                            enum amd_ip_block_type block_type,
1682                                            enum amd_powergating_state state)
1683 {
1684         struct amdgpu_device *adev = dev;
1685         int i, r = 0;
1686
1687         for (i = 0; i < adev->num_ip_blocks; i++) {
1688                 if (!adev->ip_blocks[i].status.valid)
1689                         continue;
1690                 if (adev->ip_blocks[i].version->type != block_type)
1691                         continue;
1692                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1693                         continue;
1694                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1695                         (void *)adev, state);
1696                 if (r)
1697                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1698                                   adev->ip_blocks[i].version->funcs->name, r);
1699         }
1700         return r;
1701 }
1702
1703 /**
1704  * amdgpu_device_ip_get_clockgating_state - get the CG state
1705  *
1706  * @adev: amdgpu_device pointer
1707  * @flags: clockgating feature flags
1708  *
1709  * Walks the list of IPs on the device and updates the clockgating
1710  * flags for each IP.
1711  * Updates @flags with the feature flags for each hardware IP where
1712  * clockgating is enabled.
1713  */
1714 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1715                                             u64 *flags)
1716 {
1717         int i;
1718
1719         for (i = 0; i < adev->num_ip_blocks; i++) {
1720                 if (!adev->ip_blocks[i].status.valid)
1721                         continue;
1722                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1723                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1724         }
1725 }
1726
1727 /**
1728  * amdgpu_device_ip_wait_for_idle - wait for idle
1729  *
1730  * @adev: amdgpu_device pointer
1731  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1732  *
1733  * Waits for the request hardware IP to be idle.
1734  * Returns 0 for success or a negative error code on failure.
1735  */
1736 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1737                                    enum amd_ip_block_type block_type)
1738 {
1739         int i, r;
1740
1741         for (i = 0; i < adev->num_ip_blocks; i++) {
1742                 if (!adev->ip_blocks[i].status.valid)
1743                         continue;
1744                 if (adev->ip_blocks[i].version->type == block_type) {
1745                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1746                         if (r)
1747                                 return r;
1748                         break;
1749                 }
1750         }
1751         return 0;
1752
1753 }
1754
1755 /**
1756  * amdgpu_device_ip_is_idle - is the hardware IP idle
1757  *
1758  * @adev: amdgpu_device pointer
1759  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1760  *
1761  * Check if the hardware IP is idle or not.
1762  * Returns true if it the IP is idle, false if not.
1763  */
1764 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1765                               enum amd_ip_block_type block_type)
1766 {
1767         int i;
1768
1769         for (i = 0; i < adev->num_ip_blocks; i++) {
1770                 if (!adev->ip_blocks[i].status.valid)
1771                         continue;
1772                 if (adev->ip_blocks[i].version->type == block_type)
1773                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1774         }
1775         return true;
1776
1777 }
1778
1779 /**
1780  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1781  *
1782  * @adev: amdgpu_device pointer
1783  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1784  *
1785  * Returns a pointer to the hardware IP block structure
1786  * if it exists for the asic, otherwise NULL.
1787  */
1788 struct amdgpu_ip_block *
1789 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1790                               enum amd_ip_block_type type)
1791 {
1792         int i;
1793
1794         for (i = 0; i < adev->num_ip_blocks; i++)
1795                 if (adev->ip_blocks[i].version->type == type)
1796                         return &adev->ip_blocks[i];
1797
1798         return NULL;
1799 }
1800
1801 /**
1802  * amdgpu_device_ip_block_version_cmp
1803  *
1804  * @adev: amdgpu_device pointer
1805  * @type: enum amd_ip_block_type
1806  * @major: major version
1807  * @minor: minor version
1808  *
1809  * return 0 if equal or greater
1810  * return 1 if smaller or the ip_block doesn't exist
1811  */
1812 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1813                                        enum amd_ip_block_type type,
1814                                        u32 major, u32 minor)
1815 {
1816         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1817
1818         if (ip_block && ((ip_block->version->major > major) ||
1819                         ((ip_block->version->major == major) &&
1820                         (ip_block->version->minor >= minor))))
1821                 return 0;
1822
1823         return 1;
1824 }
1825
1826 /**
1827  * amdgpu_device_ip_block_add
1828  *
1829  * @adev: amdgpu_device pointer
1830  * @ip_block_version: pointer to the IP to add
1831  *
1832  * Adds the IP block driver information to the collection of IPs
1833  * on the asic.
1834  */
1835 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1836                                const struct amdgpu_ip_block_version *ip_block_version)
1837 {
1838         if (!ip_block_version)
1839                 return -EINVAL;
1840
1841         switch (ip_block_version->type) {
1842         case AMD_IP_BLOCK_TYPE_VCN:
1843                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1844                         return 0;
1845                 break;
1846         case AMD_IP_BLOCK_TYPE_JPEG:
1847                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1848                         return 0;
1849                 break;
1850         default:
1851                 break;
1852         }
1853
1854         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1855                   ip_block_version->funcs->name);
1856
1857         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1858
1859         return 0;
1860 }
1861
1862 /**
1863  * amdgpu_device_enable_virtual_display - enable virtual display feature
1864  *
1865  * @adev: amdgpu_device pointer
1866  *
1867  * Enabled the virtual display feature if the user has enabled it via
1868  * the module parameter virtual_display.  This feature provides a virtual
1869  * display hardware on headless boards or in virtualized environments.
1870  * This function parses and validates the configuration string specified by
1871  * the user and configues the virtual display configuration (number of
1872  * virtual connectors, crtcs, etc.) specified.
1873  */
1874 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1875 {
1876         adev->enable_virtual_display = false;
1877
1878         if (amdgpu_virtual_display) {
1879                 const char *pci_address_name = pci_name(adev->pdev);
1880                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1881
1882                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1883                 pciaddstr_tmp = pciaddstr;
1884                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1885                         pciaddname = strsep(&pciaddname_tmp, ",");
1886                         if (!strcmp("all", pciaddname)
1887                             || !strcmp(pci_address_name, pciaddname)) {
1888                                 long num_crtc;
1889                                 int res = -1;
1890
1891                                 adev->enable_virtual_display = true;
1892
1893                                 if (pciaddname_tmp)
1894                                         res = kstrtol(pciaddname_tmp, 10,
1895                                                       &num_crtc);
1896
1897                                 if (!res) {
1898                                         if (num_crtc < 1)
1899                                                 num_crtc = 1;
1900                                         if (num_crtc > 6)
1901                                                 num_crtc = 6;
1902                                         adev->mode_info.num_crtc = num_crtc;
1903                                 } else {
1904                                         adev->mode_info.num_crtc = 1;
1905                                 }
1906                                 break;
1907                         }
1908                 }
1909
1910                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1911                          amdgpu_virtual_display, pci_address_name,
1912                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1913
1914                 kfree(pciaddstr);
1915         }
1916 }
1917
1918 /**
1919  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1920  *
1921  * @adev: amdgpu_device pointer
1922  *
1923  * Parses the asic configuration parameters specified in the gpu info
1924  * firmware and makes them availale to the driver for use in configuring
1925  * the asic.
1926  * Returns 0 on success, -EINVAL on failure.
1927  */
1928 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1929 {
1930         const char *chip_name;
1931         char fw_name[40];
1932         int err;
1933         const struct gpu_info_firmware_header_v1_0 *hdr;
1934
1935         adev->firmware.gpu_info_fw = NULL;
1936
1937         if (adev->mman.discovery_bin) {
1938                 /*
1939                  * FIXME: The bounding box is still needed by Navi12, so
1940                  * temporarily read it from gpu_info firmware. Should be dropped
1941                  * when DAL no longer needs it.
1942                  */
1943                 if (adev->asic_type != CHIP_NAVI12)
1944                         return 0;
1945         }
1946
1947         switch (adev->asic_type) {
1948         default:
1949                 return 0;
1950         case CHIP_VEGA10:
1951                 chip_name = "vega10";
1952                 break;
1953         case CHIP_VEGA12:
1954                 chip_name = "vega12";
1955                 break;
1956         case CHIP_RAVEN:
1957                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1958                         chip_name = "raven2";
1959                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1960                         chip_name = "picasso";
1961                 else
1962                         chip_name = "raven";
1963                 break;
1964         case CHIP_ARCTURUS:
1965                 chip_name = "arcturus";
1966                 break;
1967         case CHIP_NAVI12:
1968                 chip_name = "navi12";
1969                 break;
1970         }
1971
1972         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1973         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1974         if (err) {
1975                 dev_err(adev->dev,
1976                         "Failed to load gpu_info firmware \"%s\"\n",
1977                         fw_name);
1978                 goto out;
1979         }
1980         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1981         if (err) {
1982                 dev_err(adev->dev,
1983                         "Failed to validate gpu_info firmware \"%s\"\n",
1984                         fw_name);
1985                 goto out;
1986         }
1987
1988         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1989         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1990
1991         switch (hdr->version_major) {
1992         case 1:
1993         {
1994                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1995                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1996                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1997
1998                 /*
1999                  * Should be droped when DAL no longer needs it.
2000                  */
2001                 if (adev->asic_type == CHIP_NAVI12)
2002                         goto parse_soc_bounding_box;
2003
2004                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2005                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2006                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2007                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2008                 adev->gfx.config.max_texture_channel_caches =
2009                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
2010                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2011                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2012                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2013                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2014                 adev->gfx.config.double_offchip_lds_buf =
2015                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2016                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2017                 adev->gfx.cu_info.max_waves_per_simd =
2018                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2019                 adev->gfx.cu_info.max_scratch_slots_per_cu =
2020                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2021                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2022                 if (hdr->version_minor >= 1) {
2023                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2024                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2025                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2026                         adev->gfx.config.num_sc_per_sh =
2027                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2028                         adev->gfx.config.num_packer_per_sc =
2029                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2030                 }
2031
2032 parse_soc_bounding_box:
2033                 /*
2034                  * soc bounding box info is not integrated in disocovery table,
2035                  * we always need to parse it from gpu info firmware if needed.
2036                  */
2037                 if (hdr->version_minor == 2) {
2038                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2039                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2040                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2041                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2042                 }
2043                 break;
2044         }
2045         default:
2046                 dev_err(adev->dev,
2047                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2048                 err = -EINVAL;
2049                 goto out;
2050         }
2051 out:
2052         return err;
2053 }
2054
2055 /**
2056  * amdgpu_device_ip_early_init - run early init for hardware IPs
2057  *
2058  * @adev: amdgpu_device pointer
2059  *
2060  * Early initialization pass for hardware IPs.  The hardware IPs that make
2061  * up each asic are discovered each IP's early_init callback is run.  This
2062  * is the first stage in initializing the asic.
2063  * Returns 0 on success, negative error code on failure.
2064  */
2065 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2066 {
2067         struct drm_device *dev = adev_to_drm(adev);
2068         struct pci_dev *parent;
2069         int i, r;
2070
2071         amdgpu_device_enable_virtual_display(adev);
2072
2073         if (amdgpu_sriov_vf(adev)) {
2074                 r = amdgpu_virt_request_full_gpu(adev, true);
2075                 if (r)
2076                         return r;
2077         }
2078
2079         switch (adev->asic_type) {
2080 #ifdef CONFIG_DRM_AMDGPU_SI
2081         case CHIP_VERDE:
2082         case CHIP_TAHITI:
2083         case CHIP_PITCAIRN:
2084         case CHIP_OLAND:
2085         case CHIP_HAINAN:
2086                 adev->family = AMDGPU_FAMILY_SI;
2087                 r = si_set_ip_blocks(adev);
2088                 if (r)
2089                         return r;
2090                 break;
2091 #endif
2092 #ifdef CONFIG_DRM_AMDGPU_CIK
2093         case CHIP_BONAIRE:
2094         case CHIP_HAWAII:
2095         case CHIP_KAVERI:
2096         case CHIP_KABINI:
2097         case CHIP_MULLINS:
2098                 if (adev->flags & AMD_IS_APU)
2099                         adev->family = AMDGPU_FAMILY_KV;
2100                 else
2101                         adev->family = AMDGPU_FAMILY_CI;
2102
2103                 r = cik_set_ip_blocks(adev);
2104                 if (r)
2105                         return r;
2106                 break;
2107 #endif
2108         case CHIP_TOPAZ:
2109         case CHIP_TONGA:
2110         case CHIP_FIJI:
2111         case CHIP_POLARIS10:
2112         case CHIP_POLARIS11:
2113         case CHIP_POLARIS12:
2114         case CHIP_VEGAM:
2115         case CHIP_CARRIZO:
2116         case CHIP_STONEY:
2117                 if (adev->flags & AMD_IS_APU)
2118                         adev->family = AMDGPU_FAMILY_CZ;
2119                 else
2120                         adev->family = AMDGPU_FAMILY_VI;
2121
2122                 r = vi_set_ip_blocks(adev);
2123                 if (r)
2124                         return r;
2125                 break;
2126         default:
2127                 r = amdgpu_discovery_set_ip_blocks(adev);
2128                 if (r)
2129                         return r;
2130                 break;
2131         }
2132
2133         if (amdgpu_has_atpx() &&
2134             (amdgpu_is_atpx_hybrid() ||
2135              amdgpu_has_atpx_dgpu_power_cntl()) &&
2136             ((adev->flags & AMD_IS_APU) == 0) &&
2137             !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2138                 adev->flags |= AMD_IS_PX;
2139
2140         if (!(adev->flags & AMD_IS_APU)) {
2141                 parent = pci_upstream_bridge(adev->pdev);
2142                 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2143         }
2144
2145         amdgpu_amdkfd_device_probe(adev);
2146
2147         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2148         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2149                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2150         if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2151                 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2152
2153         for (i = 0; i < adev->num_ip_blocks; i++) {
2154                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2155                         DRM_ERROR("disabled ip block: %d <%s>\n",
2156                                   i, adev->ip_blocks[i].version->funcs->name);
2157                         adev->ip_blocks[i].status.valid = false;
2158                 } else {
2159                         if (adev->ip_blocks[i].version->funcs->early_init) {
2160                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2161                                 if (r == -ENOENT) {
2162                                         adev->ip_blocks[i].status.valid = false;
2163                                 } else if (r) {
2164                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2165                                                   adev->ip_blocks[i].version->funcs->name, r);
2166                                         return r;
2167                                 } else {
2168                                         adev->ip_blocks[i].status.valid = true;
2169                                 }
2170                         } else {
2171                                 adev->ip_blocks[i].status.valid = true;
2172                         }
2173                 }
2174                 /* get the vbios after the asic_funcs are set up */
2175                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2176                         r = amdgpu_device_parse_gpu_info_fw(adev);
2177                         if (r)
2178                                 return r;
2179
2180                         /* Read BIOS */
2181                         if (!amdgpu_get_bios(adev))
2182                                 return -EINVAL;
2183
2184                         r = amdgpu_atombios_init(adev);
2185                         if (r) {
2186                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2187                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2188                                 return r;
2189                         }
2190
2191                         /*get pf2vf msg info at it's earliest time*/
2192                         if (amdgpu_sriov_vf(adev))
2193                                 amdgpu_virt_init_data_exchange(adev);
2194
2195                 }
2196         }
2197
2198         adev->cg_flags &= amdgpu_cg_mask;
2199         adev->pg_flags &= amdgpu_pg_mask;
2200
2201         return 0;
2202 }
2203
2204 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2205 {
2206         int i, r;
2207
2208         for (i = 0; i < adev->num_ip_blocks; i++) {
2209                 if (!adev->ip_blocks[i].status.sw)
2210                         continue;
2211                 if (adev->ip_blocks[i].status.hw)
2212                         continue;
2213                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2214                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2215                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2216                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2217                         if (r) {
2218                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2219                                           adev->ip_blocks[i].version->funcs->name, r);
2220                                 return r;
2221                         }
2222                         adev->ip_blocks[i].status.hw = true;
2223                 }
2224         }
2225
2226         return 0;
2227 }
2228
2229 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2230 {
2231         int i, r;
2232
2233         for (i = 0; i < adev->num_ip_blocks; i++) {
2234                 if (!adev->ip_blocks[i].status.sw)
2235                         continue;
2236                 if (adev->ip_blocks[i].status.hw)
2237                         continue;
2238                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2239                 if (r) {
2240                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2241                                   adev->ip_blocks[i].version->funcs->name, r);
2242                         return r;
2243                 }
2244                 adev->ip_blocks[i].status.hw = true;
2245         }
2246
2247         return 0;
2248 }
2249
2250 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2251 {
2252         int r = 0;
2253         int i;
2254         uint32_t smu_version;
2255
2256         if (adev->asic_type >= CHIP_VEGA10) {
2257                 for (i = 0; i < adev->num_ip_blocks; i++) {
2258                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2259                                 continue;
2260
2261                         if (!adev->ip_blocks[i].status.sw)
2262                                 continue;
2263
2264                         /* no need to do the fw loading again if already done*/
2265                         if (adev->ip_blocks[i].status.hw == true)
2266                                 break;
2267
2268                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2269                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2270                                 if (r) {
2271                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2272                                                           adev->ip_blocks[i].version->funcs->name, r);
2273                                         return r;
2274                                 }
2275                         } else {
2276                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2277                                 if (r) {
2278                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2279                                                           adev->ip_blocks[i].version->funcs->name, r);
2280                                         return r;
2281                                 }
2282                         }
2283
2284                         adev->ip_blocks[i].status.hw = true;
2285                         break;
2286                 }
2287         }
2288
2289         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2290                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2291
2292         return r;
2293 }
2294
2295 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2296 {
2297         long timeout;
2298         int r, i;
2299
2300         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2301                 struct amdgpu_ring *ring = adev->rings[i];
2302
2303                 /* No need to setup the GPU scheduler for rings that don't need it */
2304                 if (!ring || ring->no_scheduler)
2305                         continue;
2306
2307                 switch (ring->funcs->type) {
2308                 case AMDGPU_RING_TYPE_GFX:
2309                         timeout = adev->gfx_timeout;
2310                         break;
2311                 case AMDGPU_RING_TYPE_COMPUTE:
2312                         timeout = adev->compute_timeout;
2313                         break;
2314                 case AMDGPU_RING_TYPE_SDMA:
2315                         timeout = adev->sdma_timeout;
2316                         break;
2317                 default:
2318                         timeout = adev->video_timeout;
2319                         break;
2320                 }
2321
2322                 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2323                                    ring->num_hw_submission, amdgpu_job_hang_limit,
2324                                    timeout, adev->reset_domain->wq,
2325                                    ring->sched_score, ring->name,
2326                                    adev->dev);
2327                 if (r) {
2328                         DRM_ERROR("Failed to create scheduler on ring %s.\n",
2329                                   ring->name);
2330                         return r;
2331                 }
2332         }
2333
2334         return 0;
2335 }
2336
2337
2338 /**
2339  * amdgpu_device_ip_init - run init for hardware IPs
2340  *
2341  * @adev: amdgpu_device pointer
2342  *
2343  * Main initialization pass for hardware IPs.  The list of all the hardware
2344  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2345  * are run.  sw_init initializes the software state associated with each IP
2346  * and hw_init initializes the hardware associated with each IP.
2347  * Returns 0 on success, negative error code on failure.
2348  */
2349 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2350 {
2351         int i, r;
2352
2353         r = amdgpu_ras_init(adev);
2354         if (r)
2355                 return r;
2356
2357         for (i = 0; i < adev->num_ip_blocks; i++) {
2358                 if (!adev->ip_blocks[i].status.valid)
2359                         continue;
2360                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2361                 if (r) {
2362                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2363                                   adev->ip_blocks[i].version->funcs->name, r);
2364                         goto init_failed;
2365                 }
2366                 adev->ip_blocks[i].status.sw = true;
2367
2368                 /* need to do gmc hw init early so we can allocate gpu mem */
2369                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2370                         /* Try to reserve bad pages early */
2371                         if (amdgpu_sriov_vf(adev))
2372                                 amdgpu_virt_exchange_data(adev);
2373
2374                         r = amdgpu_device_vram_scratch_init(adev);
2375                         if (r) {
2376                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2377                                 goto init_failed;
2378                         }
2379                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2380                         if (r) {
2381                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2382                                 goto init_failed;
2383                         }
2384                         r = amdgpu_device_wb_init(adev);
2385                         if (r) {
2386                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2387                                 goto init_failed;
2388                         }
2389                         adev->ip_blocks[i].status.hw = true;
2390
2391                         /* right after GMC hw init, we create CSA */
2392                         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2393                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2394                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2395                                                                 AMDGPU_CSA_SIZE);
2396                                 if (r) {
2397                                         DRM_ERROR("allocate CSA failed %d\n", r);
2398                                         goto init_failed;
2399                                 }
2400                         }
2401                 }
2402         }
2403
2404         if (amdgpu_sriov_vf(adev))
2405                 amdgpu_virt_init_data_exchange(adev);
2406
2407         r = amdgpu_ib_pool_init(adev);
2408         if (r) {
2409                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2410                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2411                 goto init_failed;
2412         }
2413
2414         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2415         if (r)
2416                 goto init_failed;
2417
2418         r = amdgpu_device_ip_hw_init_phase1(adev);
2419         if (r)
2420                 goto init_failed;
2421
2422         r = amdgpu_device_fw_loading(adev);
2423         if (r)
2424                 goto init_failed;
2425
2426         r = amdgpu_device_ip_hw_init_phase2(adev);
2427         if (r)
2428                 goto init_failed;
2429
2430         /*
2431          * retired pages will be loaded from eeprom and reserved here,
2432          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2433          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2434          * for I2C communication which only true at this point.
2435          *
2436          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2437          * failure from bad gpu situation and stop amdgpu init process
2438          * accordingly. For other failed cases, it will still release all
2439          * the resource and print error message, rather than returning one
2440          * negative value to upper level.
2441          *
2442          * Note: theoretically, this should be called before all vram allocations
2443          * to protect retired page from abusing
2444          */
2445         r = amdgpu_ras_recovery_init(adev);
2446         if (r)
2447                 goto init_failed;
2448
2449         /**
2450          * In case of XGMI grab extra reference for reset domain for this device
2451          */
2452         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2453                 if (amdgpu_xgmi_add_device(adev) == 0) {
2454                         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2455
2456                         if (!hive->reset_domain ||
2457                             !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2458                                 r = -ENOENT;
2459                                 amdgpu_put_xgmi_hive(hive);
2460                                 goto init_failed;
2461                         }
2462
2463                         /* Drop the early temporary reset domain we created for device */
2464                         amdgpu_reset_put_reset_domain(adev->reset_domain);
2465                         adev->reset_domain = hive->reset_domain;
2466                         amdgpu_put_xgmi_hive(hive);
2467                 }
2468         }
2469
2470         r = amdgpu_device_init_schedulers(adev);
2471         if (r)
2472                 goto init_failed;
2473
2474         /* Don't init kfd if whole hive need to be reset during init */
2475         if (!adev->gmc.xgmi.pending_reset)
2476                 amdgpu_amdkfd_device_init(adev);
2477
2478         amdgpu_fru_get_product_info(adev);
2479
2480 init_failed:
2481         if (amdgpu_sriov_vf(adev))
2482                 amdgpu_virt_release_full_gpu(adev, true);
2483
2484         return r;
2485 }
2486
2487 /**
2488  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2489  *
2490  * @adev: amdgpu_device pointer
2491  *
2492  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2493  * this function before a GPU reset.  If the value is retained after a
2494  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2495  */
2496 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2497 {
2498         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2499 }
2500
2501 /**
2502  * amdgpu_device_check_vram_lost - check if vram is valid
2503  *
2504  * @adev: amdgpu_device pointer
2505  *
2506  * Checks the reset magic value written to the gart pointer in VRAM.
2507  * The driver calls this after a GPU reset to see if the contents of
2508  * VRAM is lost or now.
2509  * returns true if vram is lost, false if not.
2510  */
2511 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2512 {
2513         if (memcmp(adev->gart.ptr, adev->reset_magic,
2514                         AMDGPU_RESET_MAGIC_NUM))
2515                 return true;
2516
2517         if (!amdgpu_in_reset(adev))
2518                 return false;
2519
2520         /*
2521          * For all ASICs with baco/mode1 reset, the VRAM is
2522          * always assumed to be lost.
2523          */
2524         switch (amdgpu_asic_reset_method(adev)) {
2525         case AMD_RESET_METHOD_BACO:
2526         case AMD_RESET_METHOD_MODE1:
2527                 return true;
2528         default:
2529                 return false;
2530         }
2531 }
2532
2533 /**
2534  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2535  *
2536  * @adev: amdgpu_device pointer
2537  * @state: clockgating state (gate or ungate)
2538  *
2539  * The list of all the hardware IPs that make up the asic is walked and the
2540  * set_clockgating_state callbacks are run.
2541  * Late initialization pass enabling clockgating for hardware IPs.
2542  * Fini or suspend, pass disabling clockgating for hardware IPs.
2543  * Returns 0 on success, negative error code on failure.
2544  */
2545
2546 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2547                                enum amd_clockgating_state state)
2548 {
2549         int i, j, r;
2550
2551         if (amdgpu_emu_mode == 1)
2552                 return 0;
2553
2554         for (j = 0; j < adev->num_ip_blocks; j++) {
2555                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2556                 if (!adev->ip_blocks[i].status.late_initialized)
2557                         continue;
2558                 /* skip CG for GFX on S0ix */
2559                 if (adev->in_s0ix &&
2560                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2561                         continue;
2562                 /* skip CG for VCE/UVD, it's handled specially */
2563                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2564                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2565                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2566                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2567                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2568                         /* enable clockgating to save power */
2569                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2570                                                                                      state);
2571                         if (r) {
2572                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2573                                           adev->ip_blocks[i].version->funcs->name, r);
2574                                 return r;
2575                         }
2576                 }
2577         }
2578
2579         return 0;
2580 }
2581
2582 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2583                                enum amd_powergating_state state)
2584 {
2585         int i, j, r;
2586
2587         if (amdgpu_emu_mode == 1)
2588                 return 0;
2589
2590         for (j = 0; j < adev->num_ip_blocks; j++) {
2591                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2592                 if (!adev->ip_blocks[i].status.late_initialized)
2593                         continue;
2594                 /* skip PG for GFX on S0ix */
2595                 if (adev->in_s0ix &&
2596                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2597                         continue;
2598                 /* skip CG for VCE/UVD, it's handled specially */
2599                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2600                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2601                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2602                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2603                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2604                         /* enable powergating to save power */
2605                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2606                                                                                         state);
2607                         if (r) {
2608                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2609                                           adev->ip_blocks[i].version->funcs->name, r);
2610                                 return r;
2611                         }
2612                 }
2613         }
2614         return 0;
2615 }
2616
2617 static int amdgpu_device_enable_mgpu_fan_boost(void)
2618 {
2619         struct amdgpu_gpu_instance *gpu_ins;
2620         struct amdgpu_device *adev;
2621         int i, ret = 0;
2622
2623         mutex_lock(&mgpu_info.mutex);
2624
2625         /*
2626          * MGPU fan boost feature should be enabled
2627          * only when there are two or more dGPUs in
2628          * the system
2629          */
2630         if (mgpu_info.num_dgpu < 2)
2631                 goto out;
2632
2633         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2634                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2635                 adev = gpu_ins->adev;
2636                 if (!(adev->flags & AMD_IS_APU) &&
2637                     !gpu_ins->mgpu_fan_enabled) {
2638                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2639                         if (ret)
2640                                 break;
2641
2642                         gpu_ins->mgpu_fan_enabled = 1;
2643                 }
2644         }
2645
2646 out:
2647         mutex_unlock(&mgpu_info.mutex);
2648
2649         return ret;
2650 }
2651
2652 /**
2653  * amdgpu_device_ip_late_init - run late init for hardware IPs
2654  *
2655  * @adev: amdgpu_device pointer
2656  *
2657  * Late initialization pass for hardware IPs.  The list of all the hardware
2658  * IPs that make up the asic is walked and the late_init callbacks are run.
2659  * late_init covers any special initialization that an IP requires
2660  * after all of the have been initialized or something that needs to happen
2661  * late in the init process.
2662  * Returns 0 on success, negative error code on failure.
2663  */
2664 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2665 {
2666         struct amdgpu_gpu_instance *gpu_instance;
2667         int i = 0, r;
2668
2669         for (i = 0; i < adev->num_ip_blocks; i++) {
2670                 if (!adev->ip_blocks[i].status.hw)
2671                         continue;
2672                 if (adev->ip_blocks[i].version->funcs->late_init) {
2673                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2674                         if (r) {
2675                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2676                                           adev->ip_blocks[i].version->funcs->name, r);
2677                                 return r;
2678                         }
2679                 }
2680                 adev->ip_blocks[i].status.late_initialized = true;
2681         }
2682
2683         r = amdgpu_ras_late_init(adev);
2684         if (r) {
2685                 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2686                 return r;
2687         }
2688
2689         amdgpu_ras_set_error_query_ready(adev, true);
2690
2691         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2692         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2693
2694         amdgpu_device_fill_reset_magic(adev);
2695
2696         r = amdgpu_device_enable_mgpu_fan_boost();
2697         if (r)
2698                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2699
2700         /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2701         if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2702                                adev->asic_type == CHIP_ALDEBARAN ))
2703                 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2704
2705         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2706                 mutex_lock(&mgpu_info.mutex);
2707
2708                 /*
2709                  * Reset device p-state to low as this was booted with high.
2710                  *
2711                  * This should be performed only after all devices from the same
2712                  * hive get initialized.
2713                  *
2714                  * However, it's unknown how many device in the hive in advance.
2715                  * As this is counted one by one during devices initializations.
2716                  *
2717                  * So, we wait for all XGMI interlinked devices initialized.
2718                  * This may bring some delays as those devices may come from
2719                  * different hives. But that should be OK.
2720                  */
2721                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2722                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2723                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2724                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2725                                         continue;
2726
2727                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2728                                                 AMDGPU_XGMI_PSTATE_MIN);
2729                                 if (r) {
2730                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2731                                         break;
2732                                 }
2733                         }
2734                 }
2735
2736                 mutex_unlock(&mgpu_info.mutex);
2737         }
2738
2739         return 0;
2740 }
2741
2742 /**
2743  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2744  *
2745  * @adev: amdgpu_device pointer
2746  *
2747  * For ASICs need to disable SMC first
2748  */
2749 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2750 {
2751         int i, r;
2752
2753         if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2754                 return;
2755
2756         for (i = 0; i < adev->num_ip_blocks; i++) {
2757                 if (!adev->ip_blocks[i].status.hw)
2758                         continue;
2759                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2760                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2761                         /* XXX handle errors */
2762                         if (r) {
2763                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2764                                           adev->ip_blocks[i].version->funcs->name, r);
2765                         }
2766                         adev->ip_blocks[i].status.hw = false;
2767                         break;
2768                 }
2769         }
2770 }
2771
2772 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2773 {
2774         int i, r;
2775
2776         for (i = 0; i < adev->num_ip_blocks; i++) {
2777                 if (!adev->ip_blocks[i].version->funcs->early_fini)
2778                         continue;
2779
2780                 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2781                 if (r) {
2782                         DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2783                                   adev->ip_blocks[i].version->funcs->name, r);
2784                 }
2785         }
2786
2787         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2788         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2789
2790         amdgpu_amdkfd_suspend(adev, false);
2791
2792         /* Workaroud for ASICs need to disable SMC first */
2793         amdgpu_device_smu_fini_early(adev);
2794
2795         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2796                 if (!adev->ip_blocks[i].status.hw)
2797                         continue;
2798
2799                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2800                 /* XXX handle errors */
2801                 if (r) {
2802                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2803                                   adev->ip_blocks[i].version->funcs->name, r);
2804                 }
2805
2806                 adev->ip_blocks[i].status.hw = false;
2807         }
2808
2809         if (amdgpu_sriov_vf(adev)) {
2810                 if (amdgpu_virt_release_full_gpu(adev, false))
2811                         DRM_ERROR("failed to release exclusive mode on fini\n");
2812         }
2813
2814         return 0;
2815 }
2816
2817 /**
2818  * amdgpu_device_ip_fini - run fini for hardware IPs
2819  *
2820  * @adev: amdgpu_device pointer
2821  *
2822  * Main teardown pass for hardware IPs.  The list of all the hardware
2823  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2824  * are run.  hw_fini tears down the hardware associated with each IP
2825  * and sw_fini tears down any software state associated with each IP.
2826  * Returns 0 on success, negative error code on failure.
2827  */
2828 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2829 {
2830         int i, r;
2831
2832         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2833                 amdgpu_virt_release_ras_err_handler_data(adev);
2834
2835         if (adev->gmc.xgmi.num_physical_nodes > 1)
2836                 amdgpu_xgmi_remove_device(adev);
2837
2838         amdgpu_amdkfd_device_fini_sw(adev);
2839
2840         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2841                 if (!adev->ip_blocks[i].status.sw)
2842                         continue;
2843
2844                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2845                         amdgpu_ucode_free_bo(adev);
2846                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2847                         amdgpu_device_wb_fini(adev);
2848                         amdgpu_device_vram_scratch_fini(adev);
2849                         amdgpu_ib_pool_fini(adev);
2850                 }
2851
2852                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2853                 /* XXX handle errors */
2854                 if (r) {
2855                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2856                                   adev->ip_blocks[i].version->funcs->name, r);
2857                 }
2858                 adev->ip_blocks[i].status.sw = false;
2859                 adev->ip_blocks[i].status.valid = false;
2860         }
2861
2862         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2863                 if (!adev->ip_blocks[i].status.late_initialized)
2864                         continue;
2865                 if (adev->ip_blocks[i].version->funcs->late_fini)
2866                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2867                 adev->ip_blocks[i].status.late_initialized = false;
2868         }
2869
2870         amdgpu_ras_fini(adev);
2871
2872         return 0;
2873 }
2874
2875 /**
2876  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2877  *
2878  * @work: work_struct.
2879  */
2880 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2881 {
2882         struct amdgpu_device *adev =
2883                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2884         int r;
2885
2886         r = amdgpu_ib_ring_tests(adev);
2887         if (r)
2888                 DRM_ERROR("ib ring test failed (%d).\n", r);
2889 }
2890
2891 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2892 {
2893         struct amdgpu_device *adev =
2894                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2895
2896         WARN_ON_ONCE(adev->gfx.gfx_off_state);
2897         WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2898
2899         if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2900                 adev->gfx.gfx_off_state = true;
2901 }
2902
2903 /**
2904  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2905  *
2906  * @adev: amdgpu_device pointer
2907  *
2908  * Main suspend function for hardware IPs.  The list of all the hardware
2909  * IPs that make up the asic is walked, clockgating is disabled and the
2910  * suspend callbacks are run.  suspend puts the hardware and software state
2911  * in each IP into a state suitable for suspend.
2912  * Returns 0 on success, negative error code on failure.
2913  */
2914 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2915 {
2916         int i, r;
2917
2918         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2919         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2920
2921         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2922                 if (!adev->ip_blocks[i].status.valid)
2923                         continue;
2924
2925                 /* displays are handled separately */
2926                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2927                         continue;
2928
2929                 /* XXX handle errors */
2930                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2931                 /* XXX handle errors */
2932                 if (r) {
2933                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2934                                   adev->ip_blocks[i].version->funcs->name, r);
2935                         return r;
2936                 }
2937
2938                 adev->ip_blocks[i].status.hw = false;
2939         }
2940
2941         return 0;
2942 }
2943
2944 /**
2945  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2946  *
2947  * @adev: amdgpu_device pointer
2948  *
2949  * Main suspend function for hardware IPs.  The list of all the hardware
2950  * IPs that make up the asic is walked, clockgating is disabled and the
2951  * suspend callbacks are run.  suspend puts the hardware and software state
2952  * in each IP into a state suitable for suspend.
2953  * Returns 0 on success, negative error code on failure.
2954  */
2955 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2956 {
2957         int i, r;
2958
2959         if (adev->in_s0ix)
2960                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2961
2962         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2963                 if (!adev->ip_blocks[i].status.valid)
2964                         continue;
2965                 /* displays are handled in phase1 */
2966                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2967                         continue;
2968                 /* PSP lost connection when err_event_athub occurs */
2969                 if (amdgpu_ras_intr_triggered() &&
2970                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2971                         adev->ip_blocks[i].status.hw = false;
2972                         continue;
2973                 }
2974
2975                 /* skip unnecessary suspend if we do not initialize them yet */
2976                 if (adev->gmc.xgmi.pending_reset &&
2977                     !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2978                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2979                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2980                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2981                         adev->ip_blocks[i].status.hw = false;
2982                         continue;
2983                 }
2984
2985                 /* skip suspend of gfx and psp for S0ix
2986                  * gfx is in gfxoff state, so on resume it will exit gfxoff just
2987                  * like at runtime. PSP is also part of the always on hardware
2988                  * so no need to suspend it.
2989                  */
2990                 if (adev->in_s0ix &&
2991                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2992                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
2993                         continue;
2994
2995                 /* XXX handle errors */
2996                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2997                 /* XXX handle errors */
2998                 if (r) {
2999                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
3000                                   adev->ip_blocks[i].version->funcs->name, r);
3001                 }
3002                 adev->ip_blocks[i].status.hw = false;
3003                 /* handle putting the SMC in the appropriate state */
3004                 if(!amdgpu_sriov_vf(adev)){
3005                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3006                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3007                                 if (r) {
3008                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3009                                                         adev->mp1_state, r);
3010                                         return r;
3011                                 }
3012                         }
3013                 }
3014         }
3015
3016         return 0;
3017 }
3018
3019 /**
3020  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3021  *
3022  * @adev: amdgpu_device pointer
3023  *
3024  * Main suspend function for hardware IPs.  The list of all the hardware
3025  * IPs that make up the asic is walked, clockgating is disabled and the
3026  * suspend callbacks are run.  suspend puts the hardware and software state
3027  * in each IP into a state suitable for suspend.
3028  * Returns 0 on success, negative error code on failure.
3029  */
3030 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3031 {
3032         int r;
3033
3034         if (amdgpu_sriov_vf(adev)) {
3035                 amdgpu_virt_fini_data_exchange(adev);
3036                 amdgpu_virt_request_full_gpu(adev, false);
3037         }
3038
3039         r = amdgpu_device_ip_suspend_phase1(adev);
3040         if (r)
3041                 return r;
3042         r = amdgpu_device_ip_suspend_phase2(adev);
3043
3044         if (amdgpu_sriov_vf(adev))
3045                 amdgpu_virt_release_full_gpu(adev, false);
3046
3047         return r;
3048 }
3049
3050 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3051 {
3052         int i, r;
3053
3054         static enum amd_ip_block_type ip_order[] = {
3055                 AMD_IP_BLOCK_TYPE_GMC,
3056                 AMD_IP_BLOCK_TYPE_COMMON,
3057                 AMD_IP_BLOCK_TYPE_PSP,
3058                 AMD_IP_BLOCK_TYPE_IH,
3059         };
3060
3061         for (i = 0; i < adev->num_ip_blocks; i++) {
3062                 int j;
3063                 struct amdgpu_ip_block *block;
3064
3065                 block = &adev->ip_blocks[i];
3066                 block->status.hw = false;
3067
3068                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3069
3070                         if (block->version->type != ip_order[j] ||
3071                                 !block->status.valid)
3072                                 continue;
3073
3074                         r = block->version->funcs->hw_init(adev);
3075                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3076                         if (r)
3077                                 return r;
3078                         block->status.hw = true;
3079                 }
3080         }
3081
3082         return 0;
3083 }
3084
3085 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3086 {
3087         int i, r;
3088
3089         static enum amd_ip_block_type ip_order[] = {
3090                 AMD_IP_BLOCK_TYPE_SMC,
3091                 AMD_IP_BLOCK_TYPE_DCE,
3092                 AMD_IP_BLOCK_TYPE_GFX,
3093                 AMD_IP_BLOCK_TYPE_SDMA,
3094                 AMD_IP_BLOCK_TYPE_UVD,
3095                 AMD_IP_BLOCK_TYPE_VCE,
3096                 AMD_IP_BLOCK_TYPE_VCN
3097         };
3098
3099         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3100                 int j;
3101                 struct amdgpu_ip_block *block;
3102
3103                 for (j = 0; j < adev->num_ip_blocks; j++) {
3104                         block = &adev->ip_blocks[j];
3105
3106                         if (block->version->type != ip_order[i] ||
3107                                 !block->status.valid ||
3108                                 block->status.hw)
3109                                 continue;
3110
3111                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3112                                 r = block->version->funcs->resume(adev);
3113                         else
3114                                 r = block->version->funcs->hw_init(adev);
3115
3116                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3117                         if (r)
3118                                 return r;
3119                         block->status.hw = true;
3120                 }
3121         }
3122
3123         return 0;
3124 }
3125
3126 /**
3127  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3128  *
3129  * @adev: amdgpu_device pointer
3130  *
3131  * First resume function for hardware IPs.  The list of all the hardware
3132  * IPs that make up the asic is walked and the resume callbacks are run for
3133  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3134  * after a suspend and updates the software state as necessary.  This
3135  * function is also used for restoring the GPU after a GPU reset.
3136  * Returns 0 on success, negative error code on failure.
3137  */
3138 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3139 {
3140         int i, r;
3141
3142         for (i = 0; i < adev->num_ip_blocks; i++) {
3143                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3144                         continue;
3145                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3146                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3147                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
3148
3149                         r = adev->ip_blocks[i].version->funcs->resume(adev);
3150                         if (r) {
3151                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
3152                                           adev->ip_blocks[i].version->funcs->name, r);
3153                                 return r;
3154                         }
3155                         adev->ip_blocks[i].status.hw = true;
3156                 }
3157         }
3158
3159         return 0;
3160 }
3161
3162 /**
3163  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3164  *
3165  * @adev: amdgpu_device pointer
3166  *
3167  * First resume function for hardware IPs.  The list of all the hardware
3168  * IPs that make up the asic is walked and the resume callbacks are run for
3169  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3170  * functional state after a suspend and updates the software state as
3171  * necessary.  This function is also used for restoring the GPU after a GPU
3172  * reset.
3173  * Returns 0 on success, negative error code on failure.
3174  */
3175 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3176 {
3177         int i, r;
3178
3179         for (i = 0; i < adev->num_ip_blocks; i++) {
3180                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3181                         continue;
3182                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3183                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3184                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3185                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3186                         continue;
3187                 r = adev->ip_blocks[i].version->funcs->resume(adev);
3188                 if (r) {
3189                         DRM_ERROR("resume of IP block <%s> failed %d\n",
3190                                   adev->ip_blocks[i].version->funcs->name, r);
3191                         return r;
3192                 }
3193                 adev->ip_blocks[i].status.hw = true;
3194         }
3195
3196         return 0;
3197 }
3198
3199 /**
3200  * amdgpu_device_ip_resume - run resume for hardware IPs
3201  *
3202  * @adev: amdgpu_device pointer
3203  *
3204  * Main resume function for hardware IPs.  The hardware IPs
3205  * are split into two resume functions because they are
3206  * are also used in in recovering from a GPU reset and some additional
3207  * steps need to be take between them.  In this case (S3/S4) they are
3208  * run sequentially.
3209  * Returns 0 on success, negative error code on failure.
3210  */
3211 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3212 {
3213         int r;
3214
3215         r = amdgpu_amdkfd_resume_iommu(adev);
3216         if (r)
3217                 return r;
3218
3219         r = amdgpu_device_ip_resume_phase1(adev);
3220         if (r)
3221                 return r;
3222
3223         r = amdgpu_device_fw_loading(adev);
3224         if (r)
3225                 return r;
3226
3227         r = amdgpu_device_ip_resume_phase2(adev);
3228
3229         return r;
3230 }
3231
3232 /**
3233  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3234  *
3235  * @adev: amdgpu_device pointer
3236  *
3237  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3238  */
3239 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3240 {
3241         if (amdgpu_sriov_vf(adev)) {
3242                 if (adev->is_atom_fw) {
3243                         if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3244                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3245                 } else {
3246                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3247                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3248                 }
3249
3250                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3251                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3252         }
3253 }
3254
3255 /**
3256  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3257  *
3258  * @asic_type: AMD asic type
3259  *
3260  * Check if there is DC (new modesetting infrastructre) support for an asic.
3261  * returns true if DC has support, false if not.
3262  */
3263 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3264 {
3265         switch (asic_type) {
3266 #ifdef CONFIG_DRM_AMDGPU_SI
3267         case CHIP_HAINAN:
3268 #endif
3269         case CHIP_TOPAZ:
3270                 /* chips with no display hardware */
3271                 return false;
3272 #if defined(CONFIG_DRM_AMD_DC)
3273         case CHIP_TAHITI:
3274         case CHIP_PITCAIRN:
3275         case CHIP_VERDE:
3276         case CHIP_OLAND:
3277                 /*
3278                  * We have systems in the wild with these ASICs that require
3279                  * LVDS and VGA support which is not supported with DC.
3280                  *
3281                  * Fallback to the non-DC driver here by default so as not to
3282                  * cause regressions.
3283                  */
3284 #if defined(CONFIG_DRM_AMD_DC_SI)
3285                 return amdgpu_dc > 0;
3286 #else
3287                 return false;
3288 #endif
3289         case CHIP_BONAIRE:
3290         case CHIP_KAVERI:
3291         case CHIP_KABINI:
3292         case CHIP_MULLINS:
3293                 /*
3294                  * We have systems in the wild with these ASICs that require
3295                  * VGA support which is not supported with DC.
3296                  *
3297                  * Fallback to the non-DC driver here by default so as not to
3298                  * cause regressions.
3299                  */
3300                 return amdgpu_dc > 0;
3301         default:
3302                 return amdgpu_dc != 0;
3303 #else
3304         default:
3305                 if (amdgpu_dc > 0)
3306                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3307                                          "but isn't supported by ASIC, ignoring\n");
3308                 return false;
3309 #endif
3310         }
3311 }
3312
3313 /**
3314  * amdgpu_device_has_dc_support - check if dc is supported
3315  *
3316  * @adev: amdgpu_device pointer
3317  *
3318  * Returns true for supported, false for not supported
3319  */
3320 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3321 {
3322         if (amdgpu_sriov_vf(adev) ||
3323             adev->enable_virtual_display ||
3324             (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3325                 return false;
3326
3327         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3328 }
3329
3330 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3331 {
3332         struct amdgpu_device *adev =
3333                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3334         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3335
3336         /* It's a bug to not have a hive within this function */
3337         if (WARN_ON(!hive))
3338                 return;
3339
3340         /*
3341          * Use task barrier to synchronize all xgmi reset works across the
3342          * hive. task_barrier_enter and task_barrier_exit will block
3343          * until all the threads running the xgmi reset works reach
3344          * those points. task_barrier_full will do both blocks.
3345          */
3346         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3347
3348                 task_barrier_enter(&hive->tb);
3349                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3350
3351                 if (adev->asic_reset_res)
3352                         goto fail;
3353
3354                 task_barrier_exit(&hive->tb);
3355                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3356
3357                 if (adev->asic_reset_res)
3358                         goto fail;
3359
3360                 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3361                     adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3362                         adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3363         } else {
3364
3365                 task_barrier_full(&hive->tb);
3366                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3367         }
3368
3369 fail:
3370         if (adev->asic_reset_res)
3371                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3372                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3373         amdgpu_put_xgmi_hive(hive);
3374 }
3375
3376 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3377 {
3378         char *input = amdgpu_lockup_timeout;
3379         char *timeout_setting = NULL;
3380         int index = 0;
3381         long timeout;
3382         int ret = 0;
3383
3384         /*
3385          * By default timeout for non compute jobs is 10000
3386          * and 60000 for compute jobs.
3387          * In SR-IOV or passthrough mode, timeout for compute
3388          * jobs are 60000 by default.
3389          */
3390         adev->gfx_timeout = msecs_to_jiffies(10000);
3391         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3392         if (amdgpu_sriov_vf(adev))
3393                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3394                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3395         else
3396                 adev->compute_timeout =  msecs_to_jiffies(60000);
3397
3398         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3399                 while ((timeout_setting = strsep(&input, ",")) &&
3400                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3401                         ret = kstrtol(timeout_setting, 0, &timeout);
3402                         if (ret)
3403                                 return ret;
3404
3405                         if (timeout == 0) {
3406                                 index++;
3407                                 continue;
3408                         } else if (timeout < 0) {
3409                                 timeout = MAX_SCHEDULE_TIMEOUT;
3410                                 dev_warn(adev->dev, "lockup timeout disabled");
3411                                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3412                         } else {
3413                                 timeout = msecs_to_jiffies(timeout);
3414                         }
3415
3416                         switch (index++) {
3417                         case 0:
3418                                 adev->gfx_timeout = timeout;
3419                                 break;
3420                         case 1:
3421                                 adev->compute_timeout = timeout;
3422                                 break;
3423                         case 2:
3424                                 adev->sdma_timeout = timeout;
3425                                 break;
3426                         case 3:
3427                                 adev->video_timeout = timeout;
3428                                 break;
3429                         default:
3430                                 break;
3431                         }
3432                 }
3433                 /*
3434                  * There is only one value specified and
3435                  * it should apply to all non-compute jobs.
3436                  */
3437                 if (index == 1) {
3438                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3439                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3440                                 adev->compute_timeout = adev->gfx_timeout;
3441                 }
3442         }
3443
3444         return ret;
3445 }
3446
3447 /**
3448  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3449  *
3450  * @adev: amdgpu_device pointer
3451  *
3452  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3453  */
3454 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3455 {
3456         struct iommu_domain *domain;
3457
3458         domain = iommu_get_domain_for_dev(adev->dev);
3459         if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3460                 adev->ram_is_direct_mapped = true;
3461 }
3462
3463 static const struct attribute *amdgpu_dev_attributes[] = {
3464         &dev_attr_product_name.attr,
3465         &dev_attr_product_number.attr,
3466         &dev_attr_serial_number.attr,
3467         &dev_attr_pcie_replay_count.attr,
3468         NULL
3469 };
3470
3471 /**
3472  * amdgpu_device_init - initialize the driver
3473  *
3474  * @adev: amdgpu_device pointer
3475  * @flags: driver flags
3476  *
3477  * Initializes the driver info and hw (all asics).
3478  * Returns 0 for success or an error on failure.
3479  * Called at driver startup.
3480  */
3481 int amdgpu_device_init(struct amdgpu_device *adev,
3482                        uint32_t flags)
3483 {
3484         struct drm_device *ddev = adev_to_drm(adev);
3485         struct pci_dev *pdev = adev->pdev;
3486         int r, i;
3487         bool px = false;
3488         u32 max_MBps;
3489
3490         adev->shutdown = false;
3491         adev->flags = flags;
3492
3493         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3494                 adev->asic_type = amdgpu_force_asic_type;
3495         else
3496                 adev->asic_type = flags & AMD_ASIC_MASK;
3497
3498         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3499         if (amdgpu_emu_mode == 1)
3500                 adev->usec_timeout *= 10;
3501         adev->gmc.gart_size = 512 * 1024 * 1024;
3502         adev->accel_working = false;
3503         adev->num_rings = 0;
3504         adev->mman.buffer_funcs = NULL;
3505         adev->mman.buffer_funcs_ring = NULL;
3506         adev->vm_manager.vm_pte_funcs = NULL;
3507         adev->vm_manager.vm_pte_num_scheds = 0;
3508         adev->gmc.gmc_funcs = NULL;
3509         adev->harvest_ip_mask = 0x0;
3510         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3511         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3512
3513         adev->smc_rreg = &amdgpu_invalid_rreg;
3514         adev->smc_wreg = &amdgpu_invalid_wreg;
3515         adev->pcie_rreg = &amdgpu_invalid_rreg;
3516         adev->pcie_wreg = &amdgpu_invalid_wreg;
3517         adev->pciep_rreg = &amdgpu_invalid_rreg;
3518         adev->pciep_wreg = &amdgpu_invalid_wreg;
3519         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3520         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3521         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3522         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3523         adev->didt_rreg = &amdgpu_invalid_rreg;
3524         adev->didt_wreg = &amdgpu_invalid_wreg;
3525         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3526         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3527         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3528         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3529
3530         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3531                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3532                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3533
3534         /* mutex initialization are all done here so we
3535          * can recall function without having locking issues */
3536         mutex_init(&adev->firmware.mutex);
3537         mutex_init(&adev->pm.mutex);
3538         mutex_init(&adev->gfx.gpu_clock_mutex);
3539         mutex_init(&adev->srbm_mutex);
3540         mutex_init(&adev->gfx.pipe_reserve_mutex);
3541         mutex_init(&adev->gfx.gfx_off_mutex);
3542         mutex_init(&adev->grbm_idx_mutex);
3543         mutex_init(&adev->mn_lock);
3544         mutex_init(&adev->virt.vf_errors.lock);
3545         hash_init(adev->mn_hash);
3546         mutex_init(&adev->psp.mutex);
3547         mutex_init(&adev->notifier_lock);
3548         mutex_init(&adev->pm.stable_pstate_ctx_lock);
3549         mutex_init(&adev->benchmark_mutex);
3550
3551         amdgpu_device_init_apu_flags(adev);
3552
3553         r = amdgpu_device_check_arguments(adev);
3554         if (r)
3555                 return r;
3556
3557         spin_lock_init(&adev->mmio_idx_lock);
3558         spin_lock_init(&adev->smc_idx_lock);
3559         spin_lock_init(&adev->pcie_idx_lock);
3560         spin_lock_init(&adev->uvd_ctx_idx_lock);
3561         spin_lock_init(&adev->didt_idx_lock);
3562         spin_lock_init(&adev->gc_cac_idx_lock);
3563         spin_lock_init(&adev->se_cac_idx_lock);
3564         spin_lock_init(&adev->audio_endpt_idx_lock);
3565         spin_lock_init(&adev->mm_stats.lock);
3566
3567         INIT_LIST_HEAD(&adev->shadow_list);
3568         mutex_init(&adev->shadow_list_lock);
3569
3570         INIT_LIST_HEAD(&adev->reset_list);
3571
3572         INIT_LIST_HEAD(&adev->ras_list);
3573
3574         INIT_DELAYED_WORK(&adev->delayed_init_work,
3575                           amdgpu_device_delayed_init_work_handler);
3576         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3577                           amdgpu_device_delay_enable_gfx_off);
3578
3579         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3580
3581         adev->gfx.gfx_off_req_count = 1;
3582         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3583
3584         atomic_set(&adev->throttling_logging_enabled, 1);
3585         /*
3586          * If throttling continues, logging will be performed every minute
3587          * to avoid log flooding. "-1" is subtracted since the thermal
3588          * throttling interrupt comes every second. Thus, the total logging
3589          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3590          * for throttling interrupt) = 60 seconds.
3591          */
3592         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3593         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3594
3595         /* Registers mapping */
3596         /* TODO: block userspace mapping of io register */
3597         if (adev->asic_type >= CHIP_BONAIRE) {
3598                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3599                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3600         } else {
3601                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3602                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3603         }
3604
3605         for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3606                 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3607
3608         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3609         if (adev->rmmio == NULL) {
3610                 return -ENOMEM;
3611         }
3612         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3613         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3614
3615         amdgpu_device_get_pcie_info(adev);
3616
3617         if (amdgpu_mcbp)
3618                 DRM_INFO("MCBP is enabled\n");
3619
3620         /*
3621          * Reset domain needs to be present early, before XGMI hive discovered
3622          * (if any) and intitialized to use reset sem and in_gpu reset flag
3623          * early on during init and before calling to RREG32.
3624          */
3625         adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3626         if (!adev->reset_domain)
3627                 return -ENOMEM;
3628
3629         /* detect hw virtualization here */
3630         amdgpu_detect_virtualization(adev);
3631
3632         r = amdgpu_device_get_job_timeout_settings(adev);
3633         if (r) {
3634                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3635                 return r;
3636         }
3637
3638         /* early init functions */
3639         r = amdgpu_device_ip_early_init(adev);
3640         if (r)
3641                 return r;
3642
3643         /* Enable TMZ based on IP_VERSION */
3644         amdgpu_gmc_tmz_set(adev);
3645
3646         amdgpu_gmc_noretry_set(adev);
3647         /* Need to get xgmi info early to decide the reset behavior*/
3648         if (adev->gmc.xgmi.supported) {
3649                 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3650                 if (r)
3651                         return r;
3652         }
3653
3654         /* enable PCIE atomic ops */
3655         if (amdgpu_sriov_vf(adev))
3656                 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3657                         adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3658                         (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3659         else
3660                 adev->have_atomics_support =
3661                         !pci_enable_atomic_ops_to_root(adev->pdev,
3662                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3663                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3664         if (!adev->have_atomics_support)
3665                 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3666
3667         /* doorbell bar mapping and doorbell index init*/
3668         amdgpu_device_doorbell_init(adev);
3669
3670         if (amdgpu_emu_mode == 1) {
3671                 /* post the asic on emulation mode */
3672                 emu_soc_asic_init(adev);
3673                 goto fence_driver_init;
3674         }
3675
3676         amdgpu_reset_init(adev);
3677
3678         /* detect if we are with an SRIOV vbios */
3679         amdgpu_device_detect_sriov_bios(adev);
3680
3681         /* check if we need to reset the asic
3682          *  E.g., driver was not cleanly unloaded previously, etc.
3683          */
3684         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3685                 if (adev->gmc.xgmi.num_physical_nodes) {
3686                         dev_info(adev->dev, "Pending hive reset.\n");
3687                         adev->gmc.xgmi.pending_reset = true;
3688                         /* Only need to init necessary block for SMU to handle the reset */
3689                         for (i = 0; i < adev->num_ip_blocks; i++) {
3690                                 if (!adev->ip_blocks[i].status.valid)
3691                                         continue;
3692                                 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3693                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3694                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3695                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3696                                         DRM_DEBUG("IP %s disabled for hw_init.\n",
3697                                                 adev->ip_blocks[i].version->funcs->name);
3698                                         adev->ip_blocks[i].status.hw = true;
3699                                 }
3700                         }
3701                 } else {
3702                         r = amdgpu_asic_reset(adev);
3703                         if (r) {
3704                                 dev_err(adev->dev, "asic reset on init failed\n");
3705                                 goto failed;
3706                         }
3707                 }
3708         }
3709
3710         pci_enable_pcie_error_reporting(adev->pdev);
3711
3712         /* Post card if necessary */
3713         if (amdgpu_device_need_post(adev)) {
3714                 if (!adev->bios) {
3715                         dev_err(adev->dev, "no vBIOS found\n");
3716                         r = -EINVAL;
3717                         goto failed;
3718                 }
3719                 DRM_INFO("GPU posting now...\n");
3720                 r = amdgpu_device_asic_init(adev);
3721                 if (r) {
3722                         dev_err(adev->dev, "gpu post error!\n");
3723                         goto failed;
3724                 }
3725         }
3726
3727         if (adev->is_atom_fw) {
3728                 /* Initialize clocks */
3729                 r = amdgpu_atomfirmware_get_clock_info(adev);
3730                 if (r) {
3731                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3732                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3733                         goto failed;
3734                 }
3735         } else {
3736                 /* Initialize clocks */
3737                 r = amdgpu_atombios_get_clock_info(adev);
3738                 if (r) {
3739                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3740                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3741                         goto failed;
3742                 }
3743                 /* init i2c buses */
3744                 if (!amdgpu_device_has_dc_support(adev))
3745                         amdgpu_atombios_i2c_init(adev);
3746         }
3747
3748 fence_driver_init:
3749         /* Fence driver */
3750         r = amdgpu_fence_driver_sw_init(adev);
3751         if (r) {
3752                 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3753                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3754                 goto failed;
3755         }
3756
3757         /* init the mode config */
3758         drm_mode_config_init(adev_to_drm(adev));
3759
3760         r = amdgpu_device_ip_init(adev);
3761         if (r) {
3762                 /* failed in exclusive mode due to timeout */
3763                 if (amdgpu_sriov_vf(adev) &&
3764                     !amdgpu_sriov_runtime(adev) &&
3765                     amdgpu_virt_mmio_blocked(adev) &&
3766                     !amdgpu_virt_wait_reset(adev)) {
3767                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3768                         /* Don't send request since VF is inactive. */
3769                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3770                         adev->virt.ops = NULL;
3771                         r = -EAGAIN;
3772                         goto release_ras_con;
3773                 }
3774                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3775                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3776                 goto release_ras_con;
3777         }
3778
3779         amdgpu_fence_driver_hw_init(adev);
3780
3781         dev_info(adev->dev,
3782                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3783                         adev->gfx.config.max_shader_engines,
3784                         adev->gfx.config.max_sh_per_se,
3785                         adev->gfx.config.max_cu_per_sh,
3786                         adev->gfx.cu_info.number);
3787
3788         adev->accel_working = true;
3789
3790         amdgpu_vm_check_compute_bug(adev);
3791
3792         /* Initialize the buffer migration limit. */
3793         if (amdgpu_moverate >= 0)
3794                 max_MBps = amdgpu_moverate;
3795         else
3796                 max_MBps = 8; /* Allow 8 MB/s. */
3797         /* Get a log2 for easy divisions. */
3798         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3799
3800         r = amdgpu_pm_sysfs_init(adev);
3801         if (r) {
3802                 adev->pm_sysfs_en = false;
3803                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3804         } else
3805                 adev->pm_sysfs_en = true;
3806
3807         r = amdgpu_ucode_sysfs_init(adev);
3808         if (r) {
3809                 adev->ucode_sysfs_en = false;
3810                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3811         } else
3812                 adev->ucode_sysfs_en = true;
3813
3814         r = amdgpu_psp_sysfs_init(adev);
3815         if (r) {
3816                 adev->psp_sysfs_en = false;
3817                 if (!amdgpu_sriov_vf(adev))
3818                         DRM_ERROR("Creating psp sysfs failed\n");
3819         } else
3820                 adev->psp_sysfs_en = true;
3821
3822         /*
3823          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3824          * Otherwise the mgpu fan boost feature will be skipped due to the
3825          * gpu instance is counted less.
3826          */
3827         amdgpu_register_gpu_instance(adev);
3828
3829         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3830          * explicit gating rather than handling it automatically.
3831          */
3832         if (!adev->gmc.xgmi.pending_reset) {
3833                 r = amdgpu_device_ip_late_init(adev);
3834                 if (r) {
3835                         dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3836                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3837                         goto release_ras_con;
3838                 }
3839                 /* must succeed. */
3840                 amdgpu_ras_resume(adev);
3841                 queue_delayed_work(system_wq, &adev->delayed_init_work,
3842                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3843         }
3844
3845         if (amdgpu_sriov_vf(adev))
3846                 flush_delayed_work(&adev->delayed_init_work);
3847
3848         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3849         if (r)
3850                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3851
3852         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3853                 r = amdgpu_pmu_init(adev);
3854         if (r)
3855                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3856
3857         /* Have stored pci confspace at hand for restore in sudden PCI error */
3858         if (amdgpu_device_cache_pci_state(adev->pdev))
3859                 pci_restore_state(pdev);
3860
3861         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3862         /* this will fail for cards that aren't VGA class devices, just
3863          * ignore it */
3864         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3865                 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3866
3867         if (amdgpu_device_supports_px(ddev)) {
3868                 px = true;
3869                 vga_switcheroo_register_client(adev->pdev,
3870                                                &amdgpu_switcheroo_ops, px);
3871                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3872         }
3873
3874         if (adev->gmc.xgmi.pending_reset)
3875                 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3876                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3877
3878         amdgpu_device_check_iommu_direct_map(adev);
3879
3880         return 0;
3881
3882 release_ras_con:
3883         amdgpu_release_ras_context(adev);
3884
3885 failed:
3886         amdgpu_vf_error_trans_all(adev);
3887
3888         return r;
3889 }
3890
3891 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3892 {
3893
3894         /* Clear all CPU mappings pointing to this device */
3895         unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3896
3897         /* Unmap all mapped bars - Doorbell, registers and VRAM */
3898         amdgpu_device_doorbell_fini(adev);
3899
3900         iounmap(adev->rmmio);
3901         adev->rmmio = NULL;
3902         if (adev->mman.aper_base_kaddr)
3903                 iounmap(adev->mman.aper_base_kaddr);
3904         adev->mman.aper_base_kaddr = NULL;
3905
3906         /* Memory manager related */
3907         if (!adev->gmc.xgmi.connected_to_cpu) {
3908                 arch_phys_wc_del(adev->gmc.vram_mtrr);
3909                 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3910         }
3911 }
3912
3913 /**
3914  * amdgpu_device_fini_hw - tear down the driver
3915  *
3916  * @adev: amdgpu_device pointer
3917  *
3918  * Tear down the driver info (all asics).
3919  * Called at driver shutdown.
3920  */
3921 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3922 {
3923         dev_info(adev->dev, "amdgpu: finishing device.\n");
3924         flush_delayed_work(&adev->delayed_init_work);
3925         adev->shutdown = true;
3926
3927         /* make sure IB test finished before entering exclusive mode
3928          * to avoid preemption on IB test
3929          * */
3930         if (amdgpu_sriov_vf(adev)) {
3931                 amdgpu_virt_request_full_gpu(adev, false);
3932                 amdgpu_virt_fini_data_exchange(adev);
3933         }
3934
3935         /* disable all interrupts */
3936         amdgpu_irq_disable_all(adev);
3937         if (adev->mode_info.mode_config_initialized){
3938                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3939                         drm_helper_force_disable_all(adev_to_drm(adev));
3940                 else
3941                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3942         }
3943         amdgpu_fence_driver_hw_fini(adev);
3944
3945         if (adev->mman.initialized) {
3946                 flush_delayed_work(&adev->mman.bdev.wq);
3947                 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3948         }
3949
3950         if (adev->pm_sysfs_en)
3951                 amdgpu_pm_sysfs_fini(adev);
3952         if (adev->ucode_sysfs_en)
3953                 amdgpu_ucode_sysfs_fini(adev);
3954         if (adev->psp_sysfs_en)
3955                 amdgpu_psp_sysfs_fini(adev);
3956         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3957
3958         /* disable ras feature must before hw fini */
3959         amdgpu_ras_pre_fini(adev);
3960
3961         amdgpu_device_ip_fini_early(adev);
3962
3963         amdgpu_irq_fini_hw(adev);
3964
3965         if (adev->mman.initialized)
3966                 ttm_device_clear_dma_mappings(&adev->mman.bdev);
3967
3968         amdgpu_gart_dummy_page_fini(adev);
3969
3970         if (drm_dev_is_unplugged(adev_to_drm(adev)))
3971                 amdgpu_device_unmap_mmio(adev);
3972
3973 }
3974
3975 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3976 {
3977         int idx;
3978
3979         amdgpu_fence_driver_sw_fini(adev);
3980         amdgpu_device_ip_fini(adev);
3981         release_firmware(adev->firmware.gpu_info_fw);
3982         adev->firmware.gpu_info_fw = NULL;
3983         adev->accel_working = false;
3984
3985         amdgpu_reset_fini(adev);
3986
3987         /* free i2c buses */
3988         if (!amdgpu_device_has_dc_support(adev))
3989                 amdgpu_i2c_fini(adev);
3990
3991         if (amdgpu_emu_mode != 1)
3992                 amdgpu_atombios_fini(adev);
3993
3994         kfree(adev->bios);
3995         adev->bios = NULL;
3996         if (amdgpu_device_supports_px(adev_to_drm(adev))) {
3997                 vga_switcheroo_unregister_client(adev->pdev);
3998                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3999         }
4000         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4001                 vga_client_unregister(adev->pdev);
4002
4003         if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4004
4005                 iounmap(adev->rmmio);
4006                 adev->rmmio = NULL;
4007                 amdgpu_device_doorbell_fini(adev);
4008                 drm_dev_exit(idx);
4009         }
4010
4011         if (IS_ENABLED(CONFIG_PERF_EVENTS))
4012                 amdgpu_pmu_fini(adev);
4013         if (adev->mman.discovery_bin)
4014                 amdgpu_discovery_fini(adev);
4015
4016         amdgpu_reset_put_reset_domain(adev->reset_domain);
4017         adev->reset_domain = NULL;
4018
4019         kfree(adev->pci_state);
4020
4021 }
4022
4023 /**
4024  * amdgpu_device_evict_resources - evict device resources
4025  * @adev: amdgpu device object
4026  *
4027  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4028  * of the vram memory type. Mainly used for evicting device resources
4029  * at suspend time.
4030  *
4031  */
4032 static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
4033 {
4034         /* No need to evict vram on APUs for suspend to ram or s2idle */
4035         if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4036                 return;
4037
4038         if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
4039                 DRM_WARN("evicting device resources failed\n");
4040
4041 }
4042
4043 /*
4044  * Suspend & resume.
4045  */
4046 /**
4047  * amdgpu_device_suspend - initiate device suspend
4048  *
4049  * @dev: drm dev pointer
4050  * @fbcon : notify the fbdev of suspend
4051  *
4052  * Puts the hw in the suspend state (all asics).
4053  * Returns 0 for success or an error on failure.
4054  * Called at driver suspend.
4055  */
4056 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4057 {
4058         struct amdgpu_device *adev = drm_to_adev(dev);
4059
4060         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4061                 return 0;
4062
4063         adev->in_suspend = true;
4064
4065         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4066                 DRM_WARN("smart shift update failed\n");
4067
4068         drm_kms_helper_poll_disable(dev);
4069
4070         if (fbcon)
4071                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4072
4073         cancel_delayed_work_sync(&adev->delayed_init_work);
4074
4075         amdgpu_ras_suspend(adev);
4076
4077         amdgpu_device_ip_suspend_phase1(adev);
4078
4079         if (!adev->in_s0ix)
4080                 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4081
4082         amdgpu_device_evict_resources(adev);
4083
4084         amdgpu_fence_driver_hw_fini(adev);
4085
4086         amdgpu_device_ip_suspend_phase2(adev);
4087
4088         return 0;
4089 }
4090
4091 /**
4092  * amdgpu_device_resume - initiate device resume
4093  *
4094  * @dev: drm dev pointer
4095  * @fbcon : notify the fbdev of resume
4096  *
4097  * Bring the hw back to operating state (all asics).
4098  * Returns 0 for success or an error on failure.
4099  * Called at driver resume.
4100  */
4101 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4102 {
4103         struct amdgpu_device *adev = drm_to_adev(dev);
4104         int r = 0;
4105
4106         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4107                 return 0;
4108
4109         if (adev->in_s0ix)
4110                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4111
4112         /* post card */
4113         if (amdgpu_device_need_post(adev)) {
4114                 r = amdgpu_device_asic_init(adev);
4115                 if (r)
4116                         dev_err(adev->dev, "amdgpu asic init failed\n");
4117         }
4118
4119         r = amdgpu_device_ip_resume(adev);
4120         if (r) {
4121                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4122                 return r;
4123         }
4124         amdgpu_fence_driver_hw_init(adev);
4125
4126         r = amdgpu_device_ip_late_init(adev);
4127         if (r)
4128                 return r;
4129
4130         queue_delayed_work(system_wq, &adev->delayed_init_work,
4131                            msecs_to_jiffies(AMDGPU_RESUME_MS));
4132
4133         if (!adev->in_s0ix) {
4134                 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4135                 if (r)
4136                         return r;
4137         }
4138
4139         /* Make sure IB tests flushed */
4140         flush_delayed_work(&adev->delayed_init_work);
4141
4142         if (fbcon)
4143                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4144
4145         drm_kms_helper_poll_enable(dev);
4146
4147         amdgpu_ras_resume(adev);
4148
4149         /*
4150          * Most of the connector probing functions try to acquire runtime pm
4151          * refs to ensure that the GPU is powered on when connector polling is
4152          * performed. Since we're calling this from a runtime PM callback,
4153          * trying to acquire rpm refs will cause us to deadlock.
4154          *
4155          * Since we're guaranteed to be holding the rpm lock, it's safe to
4156          * temporarily disable the rpm helpers so this doesn't deadlock us.
4157          */
4158 #ifdef CONFIG_PM
4159         dev->dev->power.disable_depth++;
4160 #endif
4161         if (!amdgpu_device_has_dc_support(adev))
4162                 drm_helper_hpd_irq_event(dev);
4163         else
4164                 drm_kms_helper_hotplug_event(dev);
4165 #ifdef CONFIG_PM
4166         dev->dev->power.disable_depth--;
4167 #endif
4168         adev->in_suspend = false;
4169
4170         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4171                 DRM_WARN("smart shift update failed\n");
4172
4173         return 0;
4174 }
4175
4176 /**
4177  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4178  *
4179  * @adev: amdgpu_device pointer
4180  *
4181  * The list of all the hardware IPs that make up the asic is walked and
4182  * the check_soft_reset callbacks are run.  check_soft_reset determines
4183  * if the asic is still hung or not.
4184  * Returns true if any of the IPs are still in a hung state, false if not.
4185  */
4186 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4187 {
4188         int i;
4189         bool asic_hang = false;
4190
4191         if (amdgpu_sriov_vf(adev))
4192                 return true;
4193
4194         if (amdgpu_asic_need_full_reset(adev))
4195                 return true;
4196
4197         for (i = 0; i < adev->num_ip_blocks; i++) {
4198                 if (!adev->ip_blocks[i].status.valid)
4199                         continue;
4200                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4201                         adev->ip_blocks[i].status.hang =
4202                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4203                 if (adev->ip_blocks[i].status.hang) {
4204                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4205                         asic_hang = true;
4206                 }
4207         }
4208         return asic_hang;
4209 }
4210
4211 /**
4212  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4213  *
4214  * @adev: amdgpu_device pointer
4215  *
4216  * The list of all the hardware IPs that make up the asic is walked and the
4217  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4218  * handles any IP specific hardware or software state changes that are
4219  * necessary for a soft reset to succeed.
4220  * Returns 0 on success, negative error code on failure.
4221  */
4222 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4223 {
4224         int i, r = 0;
4225
4226         for (i = 0; i < adev->num_ip_blocks; i++) {
4227                 if (!adev->ip_blocks[i].status.valid)
4228                         continue;
4229                 if (adev->ip_blocks[i].status.hang &&
4230                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4231                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4232                         if (r)
4233                                 return r;
4234                 }
4235         }
4236
4237         return 0;
4238 }
4239
4240 /**
4241  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4242  *
4243  * @adev: amdgpu_device pointer
4244  *
4245  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4246  * reset is necessary to recover.
4247  * Returns true if a full asic reset is required, false if not.
4248  */
4249 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4250 {
4251         int i;
4252
4253         if (amdgpu_asic_need_full_reset(adev))
4254                 return true;
4255
4256         for (i = 0; i < adev->num_ip_blocks; i++) {
4257                 if (!adev->ip_blocks[i].status.valid)
4258                         continue;
4259                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4260                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4261                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4262                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4263                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4264                         if (adev->ip_blocks[i].status.hang) {
4265                                 dev_info(adev->dev, "Some block need full reset!\n");
4266                                 return true;
4267                         }
4268                 }
4269         }
4270         return false;
4271 }
4272
4273 /**
4274  * amdgpu_device_ip_soft_reset - do a soft reset
4275  *
4276  * @adev: amdgpu_device pointer
4277  *
4278  * The list of all the hardware IPs that make up the asic is walked and the
4279  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4280  * IP specific hardware or software state changes that are necessary to soft
4281  * reset the IP.
4282  * Returns 0 on success, negative error code on failure.
4283  */
4284 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4285 {
4286         int i, r = 0;
4287
4288         for (i = 0; i < adev->num_ip_blocks; i++) {
4289                 if (!adev->ip_blocks[i].status.valid)
4290                         continue;
4291                 if (adev->ip_blocks[i].status.hang &&
4292                     adev->ip_blocks[i].version->funcs->soft_reset) {
4293                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4294                         if (r)
4295                                 return r;
4296                 }
4297         }
4298
4299         return 0;
4300 }
4301
4302 /**
4303  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4304  *
4305  * @adev: amdgpu_device pointer
4306  *
4307  * The list of all the hardware IPs that make up the asic is walked and the
4308  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4309  * handles any IP specific hardware or software state changes that are
4310  * necessary after the IP has been soft reset.
4311  * Returns 0 on success, negative error code on failure.
4312  */
4313 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4314 {
4315         int i, r = 0;
4316
4317         for (i = 0; i < adev->num_ip_blocks; i++) {
4318                 if (!adev->ip_blocks[i].status.valid)
4319                         continue;
4320                 if (adev->ip_blocks[i].status.hang &&
4321                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4322                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4323                 if (r)
4324                         return r;
4325         }
4326
4327         return 0;
4328 }
4329
4330 /**
4331  * amdgpu_device_recover_vram - Recover some VRAM contents
4332  *
4333  * @adev: amdgpu_device pointer
4334  *
4335  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4336  * restore things like GPUVM page tables after a GPU reset where
4337  * the contents of VRAM might be lost.
4338  *
4339  * Returns:
4340  * 0 on success, negative error code on failure.
4341  */
4342 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4343 {
4344         struct dma_fence *fence = NULL, *next = NULL;
4345         struct amdgpu_bo *shadow;
4346         struct amdgpu_bo_vm *vmbo;
4347         long r = 1, tmo;
4348
4349         if (amdgpu_sriov_runtime(adev))
4350                 tmo = msecs_to_jiffies(8000);
4351         else
4352                 tmo = msecs_to_jiffies(100);
4353
4354         dev_info(adev->dev, "recover vram bo from shadow start\n");
4355         mutex_lock(&adev->shadow_list_lock);
4356         list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4357                 shadow = &vmbo->bo;
4358                 /* No need to recover an evicted BO */
4359                 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4360                     shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4361                     shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4362                         continue;
4363
4364                 r = amdgpu_bo_restore_shadow(shadow, &next);
4365                 if (r)
4366                         break;
4367
4368                 if (fence) {
4369                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4370                         dma_fence_put(fence);
4371                         fence = next;
4372                         if (tmo == 0) {
4373                                 r = -ETIMEDOUT;
4374                                 break;
4375                         } else if (tmo < 0) {
4376                                 r = tmo;
4377                                 break;
4378                         }
4379                 } else {
4380                         fence = next;
4381                 }
4382         }
4383         mutex_unlock(&adev->shadow_list_lock);
4384
4385         if (fence)
4386                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4387         dma_fence_put(fence);
4388
4389         if (r < 0 || tmo <= 0) {
4390                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4391                 return -EIO;
4392         }
4393
4394         dev_info(adev->dev, "recover vram bo from shadow done\n");
4395         return 0;
4396 }
4397
4398
4399 /**
4400  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4401  *
4402  * @adev: amdgpu_device pointer
4403  * @from_hypervisor: request from hypervisor
4404  *
4405  * do VF FLR and reinitialize Asic
4406  * return 0 means succeeded otherwise failed
4407  */
4408 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4409                                      bool from_hypervisor)
4410 {
4411         int r;
4412         struct amdgpu_hive_info *hive = NULL;
4413         int retry_limit = 0;
4414
4415 retry:
4416         amdgpu_amdkfd_pre_reset(adev);
4417
4418         if (from_hypervisor)
4419                 r = amdgpu_virt_request_full_gpu(adev, true);
4420         else
4421                 r = amdgpu_virt_reset_gpu(adev);
4422         if (r)
4423                 return r;
4424
4425         /* Resume IP prior to SMC */
4426         r = amdgpu_device_ip_reinit_early_sriov(adev);
4427         if (r)
4428                 goto error;
4429
4430         amdgpu_virt_init_data_exchange(adev);
4431
4432         r = amdgpu_device_fw_loading(adev);
4433         if (r)
4434                 return r;
4435
4436         /* now we are okay to resume SMC/CP/SDMA */
4437         r = amdgpu_device_ip_reinit_late_sriov(adev);
4438         if (r)
4439                 goto error;
4440
4441         hive = amdgpu_get_xgmi_hive(adev);
4442         /* Update PSP FW topology after reset */
4443         if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4444                 r = amdgpu_xgmi_update_topology(hive, adev);
4445
4446         if (hive)
4447                 amdgpu_put_xgmi_hive(hive);
4448
4449         if (!r) {
4450                 amdgpu_irq_gpu_reset_resume_helper(adev);
4451                 r = amdgpu_ib_ring_tests(adev);
4452
4453                 amdgpu_amdkfd_post_reset(adev);
4454         }
4455
4456 error:
4457         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4458                 amdgpu_inc_vram_lost(adev);
4459                 r = amdgpu_device_recover_vram(adev);
4460         }
4461         amdgpu_virt_release_full_gpu(adev, true);
4462
4463         if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4464                 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4465                         retry_limit++;
4466                         goto retry;
4467                 } else
4468                         DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4469         }
4470
4471         return r;
4472 }
4473
4474 /**
4475  * amdgpu_device_has_job_running - check if there is any job in mirror list
4476  *
4477  * @adev: amdgpu_device pointer
4478  *
4479  * check if there is any job in mirror list
4480  */
4481 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4482 {
4483         int i;
4484         struct drm_sched_job *job;
4485
4486         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4487                 struct amdgpu_ring *ring = adev->rings[i];
4488
4489                 if (!ring || !ring->sched.thread)
4490                         continue;
4491
4492                 spin_lock(&ring->sched.job_list_lock);
4493                 job = list_first_entry_or_null(&ring->sched.pending_list,
4494                                                struct drm_sched_job, list);
4495                 spin_unlock(&ring->sched.job_list_lock);
4496                 if (job)
4497                         return true;
4498         }
4499         return false;
4500 }
4501
4502 /**
4503  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4504  *
4505  * @adev: amdgpu_device pointer
4506  *
4507  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4508  * a hung GPU.
4509  */
4510 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4511 {
4512         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4513                 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4514                 return false;
4515         }
4516
4517         if (amdgpu_gpu_recovery == 0)
4518                 goto disabled;
4519
4520         if (amdgpu_sriov_vf(adev))
4521                 return true;
4522
4523         if (amdgpu_gpu_recovery == -1) {
4524                 switch (adev->asic_type) {
4525 #ifdef CONFIG_DRM_AMDGPU_SI
4526                 case CHIP_VERDE:
4527                 case CHIP_TAHITI:
4528                 case CHIP_PITCAIRN:
4529                 case CHIP_OLAND:
4530                 case CHIP_HAINAN:
4531 #endif
4532 #ifdef CONFIG_DRM_AMDGPU_CIK
4533                 case CHIP_KAVERI:
4534                 case CHIP_KABINI:
4535                 case CHIP_MULLINS:
4536 #endif
4537                 case CHIP_CARRIZO:
4538                 case CHIP_STONEY:
4539                 case CHIP_CYAN_SKILLFISH:
4540                         goto disabled;
4541                 default:
4542                         break;
4543                 }
4544         }
4545
4546         return true;
4547
4548 disabled:
4549                 dev_info(adev->dev, "GPU recovery disabled.\n");
4550                 return false;
4551 }
4552
4553 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4554 {
4555         u32 i;
4556         int ret = 0;
4557
4558         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4559
4560         dev_info(adev->dev, "GPU mode1 reset\n");
4561
4562         /* disable BM */
4563         pci_clear_master(adev->pdev);
4564
4565         amdgpu_device_cache_pci_state(adev->pdev);
4566
4567         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4568                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4569                 ret = amdgpu_dpm_mode1_reset(adev);
4570         } else {
4571                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4572                 ret = psp_gpu_reset(adev);
4573         }
4574
4575         if (ret)
4576                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4577
4578         amdgpu_device_load_pci_state(adev->pdev);
4579
4580         /* wait for asic to come out of reset */
4581         for (i = 0; i < adev->usec_timeout; i++) {
4582                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4583
4584                 if (memsize != 0xffffffff)
4585                         break;
4586                 udelay(1);
4587         }
4588
4589         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4590         return ret;
4591 }
4592
4593 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4594                                  struct amdgpu_reset_context *reset_context)
4595 {
4596         int i, r = 0;
4597         struct amdgpu_job *job = NULL;
4598         bool need_full_reset =
4599                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4600
4601         if (reset_context->reset_req_dev == adev)
4602                 job = reset_context->job;
4603
4604         if (amdgpu_sriov_vf(adev)) {
4605                 /* stop the data exchange thread */
4606                 amdgpu_virt_fini_data_exchange(adev);
4607         }
4608
4609         amdgpu_fence_driver_isr_toggle(adev, true);
4610
4611         /* block all schedulers and reset given job's ring */
4612         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4613                 struct amdgpu_ring *ring = adev->rings[i];
4614
4615                 if (!ring || !ring->sched.thread)
4616                         continue;
4617
4618                 /*clear job fence from fence drv to avoid force_completion
4619                  *leave NULL and vm flush fence in fence drv */
4620                 amdgpu_fence_driver_clear_job_fences(ring);
4621
4622                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4623                 amdgpu_fence_driver_force_completion(ring);
4624         }
4625
4626         amdgpu_fence_driver_isr_toggle(adev, false);
4627
4628         if (job && job->vm)
4629                 drm_sched_increase_karma(&job->base);
4630
4631         r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4632         /* If reset handler not implemented, continue; otherwise return */
4633         if (r == -ENOSYS)
4634                 r = 0;
4635         else
4636                 return r;
4637
4638         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4639         if (!amdgpu_sriov_vf(adev)) {
4640
4641                 if (!need_full_reset)
4642                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4643
4644                 if (!need_full_reset) {
4645                         amdgpu_device_ip_pre_soft_reset(adev);
4646                         r = amdgpu_device_ip_soft_reset(adev);
4647                         amdgpu_device_ip_post_soft_reset(adev);
4648                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4649                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4650                                 need_full_reset = true;
4651                         }
4652                 }
4653
4654                 if (need_full_reset)
4655                         r = amdgpu_device_ip_suspend(adev);
4656                 if (need_full_reset)
4657                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4658                 else
4659                         clear_bit(AMDGPU_NEED_FULL_RESET,
4660                                   &reset_context->flags);
4661         }
4662
4663         return r;
4664 }
4665
4666 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4667 {
4668         int i;
4669
4670         lockdep_assert_held(&adev->reset_domain->sem);
4671
4672         for (i = 0; i < adev->num_regs; i++) {
4673                 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4674                 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4675                                              adev->reset_dump_reg_value[i]);
4676         }
4677
4678         return 0;
4679 }
4680
4681 #ifdef CONFIG_DEV_COREDUMP
4682 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4683                 size_t count, void *data, size_t datalen)
4684 {
4685         struct drm_printer p;
4686         struct amdgpu_device *adev = data;
4687         struct drm_print_iterator iter;
4688         int i;
4689
4690         iter.data = buffer;
4691         iter.offset = 0;
4692         iter.start = offset;
4693         iter.remain = count;
4694
4695         p = drm_coredump_printer(&iter);
4696
4697         drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4698         drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4699         drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4700         drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4701         if (adev->reset_task_info.pid)
4702                 drm_printf(&p, "process_name: %s PID: %d\n",
4703                            adev->reset_task_info.process_name,
4704                            adev->reset_task_info.pid);
4705
4706         if (adev->reset_vram_lost)
4707                 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4708         if (adev->num_regs) {
4709                 drm_printf(&p, "AMDGPU register dumps:\nOffset:     Value:\n");
4710
4711                 for (i = 0; i < adev->num_regs; i++)
4712                         drm_printf(&p, "0x%08x: 0x%08x\n",
4713                                    adev->reset_dump_reg_list[i],
4714                                    adev->reset_dump_reg_value[i]);
4715         }
4716
4717         return count - iter.remain;
4718 }
4719
4720 static void amdgpu_devcoredump_free(void *data)
4721 {
4722 }
4723
4724 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4725 {
4726         struct drm_device *dev = adev_to_drm(adev);
4727
4728         ktime_get_ts64(&adev->reset_time);
4729         dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4730                       amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4731 }
4732 #endif
4733
4734 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4735                          struct amdgpu_reset_context *reset_context)
4736 {
4737         struct amdgpu_device *tmp_adev = NULL;
4738         bool need_full_reset, skip_hw_reset, vram_lost = false;
4739         int r = 0;
4740
4741         /* Try reset handler method first */
4742         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4743                                     reset_list);
4744         amdgpu_reset_reg_dumps(tmp_adev);
4745
4746         reset_context->reset_device_list = device_list_handle;
4747         r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4748         /* If reset handler not implemented, continue; otherwise return */
4749         if (r == -ENOSYS)
4750                 r = 0;
4751         else
4752                 return r;
4753
4754         /* Reset handler not implemented, use the default method */
4755         need_full_reset =
4756                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4757         skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4758
4759         /*
4760          * ASIC reset has to be done on all XGMI hive nodes ASAP
4761          * to allow proper links negotiation in FW (within 1 sec)
4762          */
4763         if (!skip_hw_reset && need_full_reset) {
4764                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4765                         /* For XGMI run all resets in parallel to speed up the process */
4766                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4767                                 tmp_adev->gmc.xgmi.pending_reset = false;
4768                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4769                                         r = -EALREADY;
4770                         } else
4771                                 r = amdgpu_asic_reset(tmp_adev);
4772
4773                         if (r) {
4774                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4775                                          r, adev_to_drm(tmp_adev)->unique);
4776                                 break;
4777                         }
4778                 }
4779
4780                 /* For XGMI wait for all resets to complete before proceed */
4781                 if (!r) {
4782                         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4783                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4784                                         flush_work(&tmp_adev->xgmi_reset_work);
4785                                         r = tmp_adev->asic_reset_res;
4786                                         if (r)
4787                                                 break;
4788                                 }
4789                         }
4790                 }
4791         }
4792
4793         if (!r && amdgpu_ras_intr_triggered()) {
4794                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4795                         if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4796                             tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4797                                 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4798                 }
4799
4800                 amdgpu_ras_intr_cleared();
4801         }
4802
4803         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4804                 if (need_full_reset) {
4805                         /* post card */
4806                         r = amdgpu_device_asic_init(tmp_adev);
4807                         if (r) {
4808                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4809                         } else {
4810                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4811                                 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4812                                 if (r)
4813                                         goto out;
4814
4815                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4816                                 if (r)
4817                                         goto out;
4818
4819                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4820 #ifdef CONFIG_DEV_COREDUMP
4821                                 tmp_adev->reset_vram_lost = vram_lost;
4822                                 memset(&tmp_adev->reset_task_info, 0,
4823                                                 sizeof(tmp_adev->reset_task_info));
4824                                 if (reset_context->job && reset_context->job->vm)
4825                                         tmp_adev->reset_task_info =
4826                                                 reset_context->job->vm->task_info;
4827                                 amdgpu_reset_capture_coredumpm(tmp_adev);
4828 #endif
4829                                 if (vram_lost) {
4830                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4831                                         amdgpu_inc_vram_lost(tmp_adev);
4832                                 }
4833
4834                                 r = amdgpu_device_fw_loading(tmp_adev);
4835                                 if (r)
4836                                         return r;
4837
4838                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4839                                 if (r)
4840                                         goto out;
4841
4842                                 if (vram_lost)
4843                                         amdgpu_device_fill_reset_magic(tmp_adev);
4844
4845                                 /*
4846                                  * Add this ASIC as tracked as reset was already
4847                                  * complete successfully.
4848                                  */
4849                                 amdgpu_register_gpu_instance(tmp_adev);
4850
4851                                 if (!reset_context->hive &&
4852                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4853                                         amdgpu_xgmi_add_device(tmp_adev);
4854
4855                                 r = amdgpu_device_ip_late_init(tmp_adev);
4856                                 if (r)
4857                                         goto out;
4858
4859                                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4860
4861                                 /*
4862                                  * The GPU enters bad state once faulty pages
4863                                  * by ECC has reached the threshold, and ras
4864                                  * recovery is scheduled next. So add one check
4865                                  * here to break recovery if it indeed exceeds
4866                                  * bad page threshold, and remind user to
4867                                  * retire this GPU or setting one bigger
4868                                  * bad_page_threshold value to fix this once
4869                                  * probing driver again.
4870                                  */
4871                                 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4872                                         /* must succeed. */
4873                                         amdgpu_ras_resume(tmp_adev);
4874                                 } else {
4875                                         r = -EINVAL;
4876                                         goto out;
4877                                 }
4878
4879                                 /* Update PSP FW topology after reset */
4880                                 if (reset_context->hive &&
4881                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4882                                         r = amdgpu_xgmi_update_topology(
4883                                                 reset_context->hive, tmp_adev);
4884                         }
4885                 }
4886
4887 out:
4888                 if (!r) {
4889                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4890                         r = amdgpu_ib_ring_tests(tmp_adev);
4891                         if (r) {
4892                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4893                                 need_full_reset = true;
4894                                 r = -EAGAIN;
4895                                 goto end;
4896                         }
4897                 }
4898
4899                 if (!r)
4900                         r = amdgpu_device_recover_vram(tmp_adev);
4901                 else
4902                         tmp_adev->asic_reset_res = r;
4903         }
4904
4905 end:
4906         if (need_full_reset)
4907                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4908         else
4909                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4910         return r;
4911 }
4912
4913 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
4914 {
4915
4916         switch (amdgpu_asic_reset_method(adev)) {
4917         case AMD_RESET_METHOD_MODE1:
4918                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4919                 break;
4920         case AMD_RESET_METHOD_MODE2:
4921                 adev->mp1_state = PP_MP1_STATE_RESET;
4922                 break;
4923         default:
4924                 adev->mp1_state = PP_MP1_STATE_NONE;
4925                 break;
4926         }
4927 }
4928
4929 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
4930 {
4931         amdgpu_vf_error_trans_all(adev);
4932         adev->mp1_state = PP_MP1_STATE_NONE;
4933 }
4934
4935 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4936 {
4937         struct pci_dev *p = NULL;
4938
4939         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4940                         adev->pdev->bus->number, 1);
4941         if (p) {
4942                 pm_runtime_enable(&(p->dev));
4943                 pm_runtime_resume(&(p->dev));
4944         }
4945 }
4946
4947 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4948 {
4949         enum amd_reset_method reset_method;
4950         struct pci_dev *p = NULL;
4951         u64 expires;
4952
4953         /*
4954          * For now, only BACO and mode1 reset are confirmed
4955          * to suffer the audio issue without proper suspended.
4956          */
4957         reset_method = amdgpu_asic_reset_method(adev);
4958         if ((reset_method != AMD_RESET_METHOD_BACO) &&
4959              (reset_method != AMD_RESET_METHOD_MODE1))
4960                 return -EINVAL;
4961
4962         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4963                         adev->pdev->bus->number, 1);
4964         if (!p)
4965                 return -ENODEV;
4966
4967         expires = pm_runtime_autosuspend_expiration(&(p->dev));
4968         if (!expires)
4969                 /*
4970                  * If we cannot get the audio device autosuspend delay,
4971                  * a fixed 4S interval will be used. Considering 3S is
4972                  * the audio controller default autosuspend delay setting.
4973                  * 4S used here is guaranteed to cover that.
4974                  */
4975                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4976
4977         while (!pm_runtime_status_suspended(&(p->dev))) {
4978                 if (!pm_runtime_suspend(&(p->dev)))
4979                         break;
4980
4981                 if (expires < ktime_get_mono_fast_ns()) {
4982                         dev_warn(adev->dev, "failed to suspend display audio\n");
4983                         /* TODO: abort the succeeding gpu reset? */
4984                         return -ETIMEDOUT;
4985                 }
4986         }
4987
4988         pm_runtime_disable(&(p->dev));
4989
4990         return 0;
4991 }
4992
4993 static void amdgpu_device_recheck_guilty_jobs(
4994         struct amdgpu_device *adev, struct list_head *device_list_handle,
4995         struct amdgpu_reset_context *reset_context)
4996 {
4997         int i, r = 0;
4998
4999         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5000                 struct amdgpu_ring *ring = adev->rings[i];
5001                 int ret = 0;
5002                 struct drm_sched_job *s_job;
5003
5004                 if (!ring || !ring->sched.thread)
5005                         continue;
5006
5007                 s_job = list_first_entry_or_null(&ring->sched.pending_list,
5008                                 struct drm_sched_job, list);
5009                 if (s_job == NULL)
5010                         continue;
5011
5012                 /* clear job's guilty and depend the folowing step to decide the real one */
5013                 drm_sched_reset_karma(s_job);
5014                 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
5015
5016                 if (!s_job->s_fence->parent) {
5017                         DRM_WARN("Failed to get a HW fence for job!");
5018                         continue;
5019                 }
5020
5021                 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
5022                 if (ret == 0) { /* timeout */
5023                         DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
5024                                                 ring->sched.name, s_job->id);
5025
5026
5027                         amdgpu_fence_driver_isr_toggle(adev, true);
5028
5029                         /* Clear this failed job from fence array */
5030                         amdgpu_fence_driver_clear_job_fences(ring);
5031
5032                         amdgpu_fence_driver_isr_toggle(adev, false);
5033
5034                         /* Since the job won't signal and we go for
5035                          * another resubmit drop this parent pointer
5036                          */
5037                         dma_fence_put(s_job->s_fence->parent);
5038                         s_job->s_fence->parent = NULL;
5039
5040                         /* set guilty */
5041                         drm_sched_increase_karma(s_job);
5042 retry:
5043                         /* do hw reset */
5044                         if (amdgpu_sriov_vf(adev)) {
5045                                 amdgpu_virt_fini_data_exchange(adev);
5046                                 r = amdgpu_device_reset_sriov(adev, false);
5047                                 if (r)
5048                                         adev->asic_reset_res = r;
5049                         } else {
5050                                 clear_bit(AMDGPU_SKIP_HW_RESET,
5051                                           &reset_context->flags);
5052                                 r = amdgpu_do_asic_reset(device_list_handle,
5053                                                          reset_context);
5054                                 if (r && r == -EAGAIN)
5055                                         goto retry;
5056                         }
5057
5058                         /*
5059                          * add reset counter so that the following
5060                          * resubmitted job could flush vmid
5061                          */
5062                         atomic_inc(&adev->gpu_reset_counter);
5063                         continue;
5064                 }
5065
5066                 /* got the hw fence, signal finished fence */
5067                 atomic_dec(ring->sched.score);
5068                 dma_fence_get(&s_job->s_fence->finished);
5069                 dma_fence_signal(&s_job->s_fence->finished);
5070                 dma_fence_put(&s_job->s_fence->finished);
5071
5072                 /* remove node from list and free the job */
5073                 spin_lock(&ring->sched.job_list_lock);
5074                 list_del_init(&s_job->list);
5075                 spin_unlock(&ring->sched.job_list_lock);
5076                 ring->sched.ops->free_job(s_job);
5077         }
5078 }
5079
5080 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5081 {
5082         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5083
5084 #if defined(CONFIG_DEBUG_FS)
5085         if (!amdgpu_sriov_vf(adev))
5086                 cancel_work(&adev->reset_work);
5087 #endif
5088
5089         if (adev->kfd.dev)
5090                 cancel_work(&adev->kfd.reset_work);
5091
5092         if (amdgpu_sriov_vf(adev))
5093                 cancel_work(&adev->virt.flr_work);
5094
5095         if (con && adev->ras_enabled)
5096                 cancel_work(&con->recovery_work);
5097
5098 }
5099
5100
5101 /**
5102  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5103  *
5104  * @adev: amdgpu_device pointer
5105  * @job: which job trigger hang
5106  *
5107  * Attempt to reset the GPU if it has hung (all asics).
5108  * Attempt to do soft-reset or full-reset and reinitialize Asic
5109  * Returns 0 for success or an error on failure.
5110  */
5111
5112 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5113                               struct amdgpu_job *job,
5114                               struct amdgpu_reset_context *reset_context)
5115 {
5116         struct list_head device_list, *device_list_handle =  NULL;
5117         bool job_signaled = false;
5118         struct amdgpu_hive_info *hive = NULL;
5119         struct amdgpu_device *tmp_adev = NULL;
5120         int i, r = 0;
5121         bool need_emergency_restart = false;
5122         bool audio_suspended = false;
5123         int tmp_vram_lost_counter;
5124
5125         /*
5126          * Special case: RAS triggered and full reset isn't supported
5127          */
5128         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5129
5130         /*
5131          * Flush RAM to disk so that after reboot
5132          * the user can read log and see why the system rebooted.
5133          */
5134         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5135                 DRM_WARN("Emergency reboot.");
5136
5137                 ksys_sync_helper();
5138                 emergency_restart();
5139         }
5140
5141         dev_info(adev->dev, "GPU %s begin!\n",
5142                 need_emergency_restart ? "jobs stop":"reset");
5143
5144         if (!amdgpu_sriov_vf(adev))
5145                 hive = amdgpu_get_xgmi_hive(adev);
5146         if (hive)
5147                 mutex_lock(&hive->hive_lock);
5148
5149         reset_context->job = job;
5150         reset_context->hive = hive;
5151         /*
5152          * Build list of devices to reset.
5153          * In case we are in XGMI hive mode, resort the device list
5154          * to put adev in the 1st position.
5155          */
5156         INIT_LIST_HEAD(&device_list);
5157         if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5158                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
5159                         list_add_tail(&tmp_adev->reset_list, &device_list);
5160                 if (!list_is_first(&adev->reset_list, &device_list))
5161                         list_rotate_to_front(&adev->reset_list, &device_list);
5162                 device_list_handle = &device_list;
5163         } else {
5164                 list_add_tail(&adev->reset_list, &device_list);
5165                 device_list_handle = &device_list;
5166         }
5167
5168         /* We need to lock reset domain only once both for XGMI and single device */
5169         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5170                                     reset_list);
5171         amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5172
5173         /* block all schedulers and reset given job's ring */
5174         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5175
5176                 amdgpu_device_set_mp1_state(tmp_adev);
5177
5178                 /*
5179                  * Try to put the audio codec into suspend state
5180                  * before gpu reset started.
5181                  *
5182                  * Due to the power domain of the graphics device
5183                  * is shared with AZ power domain. Without this,
5184                  * we may change the audio hardware from behind
5185                  * the audio driver's back. That will trigger
5186                  * some audio codec errors.
5187                  */
5188                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5189                         audio_suspended = true;
5190
5191                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5192
5193                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5194
5195                 if (!amdgpu_sriov_vf(tmp_adev))
5196                         amdgpu_amdkfd_pre_reset(tmp_adev);
5197
5198                 /*
5199                  * Mark these ASICs to be reseted as untracked first
5200                  * And add them back after reset completed
5201                  */
5202                 amdgpu_unregister_gpu_instance(tmp_adev);
5203
5204                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5205
5206                 /* disable ras on ALL IPs */
5207                 if (!need_emergency_restart &&
5208                       amdgpu_device_ip_need_full_reset(tmp_adev))
5209                         amdgpu_ras_suspend(tmp_adev);
5210
5211                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5212                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5213
5214                         if (!ring || !ring->sched.thread)
5215                                 continue;
5216
5217                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5218
5219                         if (need_emergency_restart)
5220                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5221                 }
5222                 atomic_inc(&tmp_adev->gpu_reset_counter);
5223         }
5224
5225         if (need_emergency_restart)
5226                 goto skip_sched_resume;
5227
5228         /*
5229          * Must check guilty signal here since after this point all old
5230          * HW fences are force signaled.
5231          *
5232          * job->base holds a reference to parent fence
5233          */
5234         if (job && dma_fence_is_signaled(&job->hw_fence)) {
5235                 job_signaled = true;
5236                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5237                 goto skip_hw_reset;
5238         }
5239
5240 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
5241         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5242                 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5243                 /*TODO Should we stop ?*/
5244                 if (r) {
5245                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5246                                   r, adev_to_drm(tmp_adev)->unique);
5247                         tmp_adev->asic_reset_res = r;
5248                 }
5249
5250                 /*
5251                  * Drop all pending non scheduler resets. Scheduler resets
5252                  * were already dropped during drm_sched_stop
5253                  */
5254                 amdgpu_device_stop_pending_resets(tmp_adev);
5255         }
5256
5257         tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5258         /* Actual ASIC resets if needed.*/
5259         /* Host driver will handle XGMI hive reset for SRIOV */
5260         if (amdgpu_sriov_vf(adev)) {
5261                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5262                 if (r)
5263                         adev->asic_reset_res = r;
5264
5265                 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5266                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5267                         amdgpu_ras_resume(adev);
5268         } else {
5269                 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5270                 if (r && r == -EAGAIN)
5271                         goto retry;
5272         }
5273
5274 skip_hw_reset:
5275
5276         /* Post ASIC reset for all devs .*/
5277         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5278
5279                 /*
5280                  * Sometimes a later bad compute job can block a good gfx job as gfx
5281                  * and compute ring share internal GC HW mutually. We add an additional
5282                  * guilty jobs recheck step to find the real guilty job, it synchronously
5283                  * submits and pends for the first job being signaled. If it gets timeout,
5284                  * we identify it as a real guilty job.
5285                  */
5286                 if (amdgpu_gpu_recovery == 2 &&
5287                         !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5288                         amdgpu_device_recheck_guilty_jobs(
5289                                 tmp_adev, device_list_handle, reset_context);
5290
5291                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5292                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5293
5294                         if (!ring || !ring->sched.thread)
5295                                 continue;
5296
5297                         /* No point to resubmit jobs if we didn't HW reset*/
5298                         if (!tmp_adev->asic_reset_res && !job_signaled)
5299                                 drm_sched_resubmit_jobs(&ring->sched);
5300
5301                         drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5302                 }
5303
5304                 if (adev->enable_mes)
5305                         amdgpu_mes_self_test(tmp_adev);
5306
5307                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5308                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5309                 }
5310
5311                 if (tmp_adev->asic_reset_res)
5312                         r = tmp_adev->asic_reset_res;
5313
5314                 tmp_adev->asic_reset_res = 0;
5315
5316                 if (r) {
5317                         /* bad news, how to tell it to userspace ? */
5318                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5319                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5320                 } else {
5321                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5322                         if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5323                                 DRM_WARN("smart shift update failed\n");
5324                 }
5325         }
5326
5327 skip_sched_resume:
5328         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5329                 /* unlock kfd: SRIOV would do it separately */
5330                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5331                         amdgpu_amdkfd_post_reset(tmp_adev);
5332
5333                 /* kfd_post_reset will do nothing if kfd device is not initialized,
5334                  * need to bring up kfd here if it's not be initialized before
5335                  */
5336                 if (!adev->kfd.init_complete)
5337                         amdgpu_amdkfd_device_init(adev);
5338
5339                 if (audio_suspended)
5340                         amdgpu_device_resume_display_audio(tmp_adev);
5341
5342                 amdgpu_device_unset_mp1_state(tmp_adev);
5343         }
5344
5345         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5346                                             reset_list);
5347         amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5348
5349         if (hive) {
5350                 mutex_unlock(&hive->hive_lock);
5351                 amdgpu_put_xgmi_hive(hive);
5352         }
5353
5354         if (r)
5355                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5356
5357         atomic_set(&adev->reset_domain->reset_res, r);
5358         return r;
5359 }
5360
5361 /**
5362  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5363  *
5364  * @adev: amdgpu_device pointer
5365  *
5366  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5367  * and lanes) of the slot the device is in. Handles APUs and
5368  * virtualized environments where PCIE config space may not be available.
5369  */
5370 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5371 {
5372         struct pci_dev *pdev;
5373         enum pci_bus_speed speed_cap, platform_speed_cap;
5374         enum pcie_link_width platform_link_width;
5375
5376         if (amdgpu_pcie_gen_cap)
5377                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5378
5379         if (amdgpu_pcie_lane_cap)
5380                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5381
5382         /* covers APUs as well */
5383         if (pci_is_root_bus(adev->pdev->bus)) {
5384                 if (adev->pm.pcie_gen_mask == 0)
5385                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5386                 if (adev->pm.pcie_mlw_mask == 0)
5387                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5388                 return;
5389         }
5390
5391         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5392                 return;
5393
5394         pcie_bandwidth_available(adev->pdev, NULL,
5395                                  &platform_speed_cap, &platform_link_width);
5396
5397         if (adev->pm.pcie_gen_mask == 0) {
5398                 /* asic caps */
5399                 pdev = adev->pdev;
5400                 speed_cap = pcie_get_speed_cap(pdev);
5401                 if (speed_cap == PCI_SPEED_UNKNOWN) {
5402                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5403                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5404                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5405                 } else {
5406                         if (speed_cap == PCIE_SPEED_32_0GT)
5407                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5408                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5409                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5410                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5411                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5412                         else if (speed_cap == PCIE_SPEED_16_0GT)
5413                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5414                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5415                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5416                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5417                         else if (speed_cap == PCIE_SPEED_8_0GT)
5418                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5419                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5420                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5421                         else if (speed_cap == PCIE_SPEED_5_0GT)
5422                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5423                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5424                         else
5425                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5426                 }
5427                 /* platform caps */
5428                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5429                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5430                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5431                 } else {
5432                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
5433                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5434                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5435                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5436                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5437                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5438                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5439                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5440                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5441                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5442                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5443                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5444                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5445                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5446                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5447                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5448                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5449                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5450                         else
5451                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5452
5453                 }
5454         }
5455         if (adev->pm.pcie_mlw_mask == 0) {
5456                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5457                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5458                 } else {
5459                         switch (platform_link_width) {
5460                         case PCIE_LNK_X32:
5461                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5462                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5463                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5464                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5465                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5466                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5467                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5468                                 break;
5469                         case PCIE_LNK_X16:
5470                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5471                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5472                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5473                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5474                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5475                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5476                                 break;
5477                         case PCIE_LNK_X12:
5478                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5479                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5480                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5481                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5482                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5483                                 break;
5484                         case PCIE_LNK_X8:
5485                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5486                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5487                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5488                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5489                                 break;
5490                         case PCIE_LNK_X4:
5491                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5492                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5493                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5494                                 break;
5495                         case PCIE_LNK_X2:
5496                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5497                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5498                                 break;
5499                         case PCIE_LNK_X1:
5500                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5501                                 break;
5502                         default:
5503                                 break;
5504                         }
5505                 }
5506         }
5507 }
5508
5509 /**
5510  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5511  *
5512  * @adev: amdgpu_device pointer
5513  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5514  *
5515  * Return true if @peer_adev can access (DMA) @adev through the PCIe
5516  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5517  * @peer_adev.
5518  */
5519 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5520                                       struct amdgpu_device *peer_adev)
5521 {
5522 #ifdef CONFIG_HSA_AMD_P2P
5523         uint64_t address_mask = peer_adev->dev->dma_mask ?
5524                 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5525         resource_size_t aper_limit =
5526                 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5527         bool p2p_access = !adev->gmc.xgmi.connected_to_cpu &&
5528                           !(pci_p2pdma_distance_many(adev->pdev,
5529                                         &peer_adev->dev, 1, true) < 0);
5530
5531         return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5532                 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5533                 !(adev->gmc.aper_base & address_mask ||
5534                   aper_limit & address_mask));
5535 #else
5536         return false;
5537 #endif
5538 }
5539
5540 int amdgpu_device_baco_enter(struct drm_device *dev)
5541 {
5542         struct amdgpu_device *adev = drm_to_adev(dev);
5543         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5544
5545         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5546                 return -ENOTSUPP;
5547
5548         if (ras && adev->ras_enabled &&
5549             adev->nbio.funcs->enable_doorbell_interrupt)
5550                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5551
5552         return amdgpu_dpm_baco_enter(adev);
5553 }
5554
5555 int amdgpu_device_baco_exit(struct drm_device *dev)
5556 {
5557         struct amdgpu_device *adev = drm_to_adev(dev);
5558         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5559         int ret = 0;
5560
5561         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5562                 return -ENOTSUPP;
5563
5564         ret = amdgpu_dpm_baco_exit(adev);
5565         if (ret)
5566                 return ret;
5567
5568         if (ras && adev->ras_enabled &&
5569             adev->nbio.funcs->enable_doorbell_interrupt)
5570                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5571
5572         if (amdgpu_passthrough(adev) &&
5573             adev->nbio.funcs->clear_doorbell_interrupt)
5574                 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5575
5576         return 0;
5577 }
5578
5579 /**
5580  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5581  * @pdev: PCI device struct
5582  * @state: PCI channel state
5583  *
5584  * Description: Called when a PCI error is detected.
5585  *
5586  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5587  */
5588 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5589 {
5590         struct drm_device *dev = pci_get_drvdata(pdev);
5591         struct amdgpu_device *adev = drm_to_adev(dev);
5592         int i;
5593
5594         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5595
5596         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5597                 DRM_WARN("No support for XGMI hive yet...");
5598                 return PCI_ERS_RESULT_DISCONNECT;
5599         }
5600
5601         adev->pci_channel_state = state;
5602
5603         switch (state) {
5604         case pci_channel_io_normal:
5605                 return PCI_ERS_RESULT_CAN_RECOVER;
5606         /* Fatal error, prepare for slot reset */
5607         case pci_channel_io_frozen:
5608                 /*
5609                  * Locking adev->reset_domain->sem will prevent any external access
5610                  * to GPU during PCI error recovery
5611                  */
5612                 amdgpu_device_lock_reset_domain(adev->reset_domain);
5613                 amdgpu_device_set_mp1_state(adev);
5614
5615                 /*
5616                  * Block any work scheduling as we do for regular GPU reset
5617                  * for the duration of the recovery
5618                  */
5619                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5620                         struct amdgpu_ring *ring = adev->rings[i];
5621
5622                         if (!ring || !ring->sched.thread)
5623                                 continue;
5624
5625                         drm_sched_stop(&ring->sched, NULL);
5626                 }
5627                 atomic_inc(&adev->gpu_reset_counter);
5628                 return PCI_ERS_RESULT_NEED_RESET;
5629         case pci_channel_io_perm_failure:
5630                 /* Permanent error, prepare for device removal */
5631                 return PCI_ERS_RESULT_DISCONNECT;
5632         }
5633
5634         return PCI_ERS_RESULT_NEED_RESET;
5635 }
5636
5637 /**
5638  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5639  * @pdev: pointer to PCI device
5640  */
5641 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5642 {
5643
5644         DRM_INFO("PCI error: mmio enabled callback!!\n");
5645
5646         /* TODO - dump whatever for debugging purposes */
5647
5648         /* This called only if amdgpu_pci_error_detected returns
5649          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5650          * works, no need to reset slot.
5651          */
5652
5653         return PCI_ERS_RESULT_RECOVERED;
5654 }
5655
5656 /**
5657  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5658  * @pdev: PCI device struct
5659  *
5660  * Description: This routine is called by the pci error recovery
5661  * code after the PCI slot has been reset, just before we
5662  * should resume normal operations.
5663  */
5664 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5665 {
5666         struct drm_device *dev = pci_get_drvdata(pdev);
5667         struct amdgpu_device *adev = drm_to_adev(dev);
5668         int r, i;
5669         struct amdgpu_reset_context reset_context;
5670         u32 memsize;
5671         struct list_head device_list;
5672
5673         DRM_INFO("PCI error: slot reset callback!!\n");
5674
5675         memset(&reset_context, 0, sizeof(reset_context));
5676
5677         INIT_LIST_HEAD(&device_list);
5678         list_add_tail(&adev->reset_list, &device_list);
5679
5680         /* wait for asic to come out of reset */
5681         msleep(500);
5682
5683         /* Restore PCI confspace */
5684         amdgpu_device_load_pci_state(pdev);
5685
5686         /* confirm  ASIC came out of reset */
5687         for (i = 0; i < adev->usec_timeout; i++) {
5688                 memsize = amdgpu_asic_get_config_memsize(adev);
5689
5690                 if (memsize != 0xffffffff)
5691                         break;
5692                 udelay(1);
5693         }
5694         if (memsize == 0xffffffff) {
5695                 r = -ETIME;
5696                 goto out;
5697         }
5698
5699         reset_context.method = AMD_RESET_METHOD_NONE;
5700         reset_context.reset_req_dev = adev;
5701         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5702         set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5703
5704         adev->no_hw_access = true;
5705         r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5706         adev->no_hw_access = false;
5707         if (r)
5708                 goto out;
5709
5710         r = amdgpu_do_asic_reset(&device_list, &reset_context);
5711
5712 out:
5713         if (!r) {
5714                 if (amdgpu_device_cache_pci_state(adev->pdev))
5715                         pci_restore_state(adev->pdev);
5716
5717                 DRM_INFO("PCIe error recovery succeeded\n");
5718         } else {
5719                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5720                 amdgpu_device_unset_mp1_state(adev);
5721                 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5722         }
5723
5724         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5725 }
5726
5727 /**
5728  * amdgpu_pci_resume() - resume normal ops after PCI reset
5729  * @pdev: pointer to PCI device
5730  *
5731  * Called when the error recovery driver tells us that its
5732  * OK to resume normal operation.
5733  */
5734 void amdgpu_pci_resume(struct pci_dev *pdev)
5735 {
5736         struct drm_device *dev = pci_get_drvdata(pdev);
5737         struct amdgpu_device *adev = drm_to_adev(dev);
5738         int i;
5739
5740
5741         DRM_INFO("PCI error: resume callback!!\n");
5742
5743         /* Only continue execution for the case of pci_channel_io_frozen */
5744         if (adev->pci_channel_state != pci_channel_io_frozen)
5745                 return;
5746
5747         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5748                 struct amdgpu_ring *ring = adev->rings[i];
5749
5750                 if (!ring || !ring->sched.thread)
5751                         continue;
5752
5753
5754                 drm_sched_resubmit_jobs(&ring->sched);
5755                 drm_sched_start(&ring->sched, true);
5756         }
5757
5758         amdgpu_device_unset_mp1_state(adev);
5759         amdgpu_device_unlock_reset_domain(adev->reset_domain);
5760 }
5761
5762 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5763 {
5764         struct drm_device *dev = pci_get_drvdata(pdev);
5765         struct amdgpu_device *adev = drm_to_adev(dev);
5766         int r;
5767
5768         r = pci_save_state(pdev);
5769         if (!r) {
5770                 kfree(adev->pci_state);
5771
5772                 adev->pci_state = pci_store_saved_state(pdev);
5773
5774                 if (!adev->pci_state) {
5775                         DRM_ERROR("Failed to store PCI saved state");
5776                         return false;
5777                 }
5778         } else {
5779                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5780                 return false;
5781         }
5782
5783         return true;
5784 }
5785
5786 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5787 {
5788         struct drm_device *dev = pci_get_drvdata(pdev);
5789         struct amdgpu_device *adev = drm_to_adev(dev);
5790         int r;
5791
5792         if (!adev->pci_state)
5793                 return false;
5794
5795         r = pci_load_saved_state(pdev, adev->pci_state);
5796
5797         if (!r) {
5798                 pci_restore_state(pdev);
5799         } else {
5800                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5801                 return false;
5802         }
5803
5804         return true;
5805 }
5806
5807 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5808                 struct amdgpu_ring *ring)
5809 {
5810 #ifdef CONFIG_X86_64
5811         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5812                 return;
5813 #endif
5814         if (adev->gmc.xgmi.connected_to_cpu)
5815                 return;
5816
5817         if (ring && ring->funcs->emit_hdp_flush)
5818                 amdgpu_ring_emit_hdp_flush(ring);
5819         else
5820                 amdgpu_asic_flush_hdp(adev, ring);
5821 }
5822
5823 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5824                 struct amdgpu_ring *ring)
5825 {
5826 #ifdef CONFIG_X86_64
5827         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5828                 return;
5829 #endif
5830         if (adev->gmc.xgmi.connected_to_cpu)
5831                 return;
5832
5833         amdgpu_asic_invalidate_hdp(adev, ring);
5834 }
5835
5836 int amdgpu_in_reset(struct amdgpu_device *adev)
5837 {
5838         return atomic_read(&adev->reset_domain->in_gpu_reset);
5839         }
5840         
5841 /**
5842  * amdgpu_device_halt() - bring hardware to some kind of halt state
5843  *
5844  * @adev: amdgpu_device pointer
5845  *
5846  * Bring hardware to some kind of halt state so that no one can touch it
5847  * any more. It will help to maintain error context when error occurred.
5848  * Compare to a simple hang, the system will keep stable at least for SSH
5849  * access. Then it should be trivial to inspect the hardware state and
5850  * see what's going on. Implemented as following:
5851  *
5852  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5853  *    clears all CPU mappings to device, disallows remappings through page faults
5854  * 2. amdgpu_irq_disable_all() disables all interrupts
5855  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5856  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5857  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5858  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5859  *    flush any in flight DMA operations
5860  */
5861 void amdgpu_device_halt(struct amdgpu_device *adev)
5862 {
5863         struct pci_dev *pdev = adev->pdev;
5864         struct drm_device *ddev = adev_to_drm(adev);
5865
5866         drm_dev_unplug(ddev);
5867
5868         amdgpu_irq_disable_all(adev);
5869
5870         amdgpu_fence_driver_hw_fini(adev);
5871
5872         adev->no_hw_access = true;
5873
5874         amdgpu_device_unmap_mmio(adev);
5875
5876         pci_disable_device(pdev);
5877         pci_wait_for_pending_transaction(pdev);
5878 }
5879
5880 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5881                                 u32 reg)
5882 {
5883         unsigned long flags, address, data;
5884         u32 r;
5885
5886         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5887         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5888
5889         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5890         WREG32(address, reg * 4);
5891         (void)RREG32(address);
5892         r = RREG32(data);
5893         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5894         return r;
5895 }
5896
5897 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5898                                 u32 reg, u32 v)
5899 {
5900         unsigned long flags, address, data;
5901
5902         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5903         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5904
5905         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5906         WREG32(address, reg * 4);
5907         (void)RREG32(address);
5908         WREG32(data, v);
5909         (void)RREG32(data);
5910         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5911 }
This page took 0.39232 seconds and 4 git commands to generate.