]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
Merge tag 'ieee802154-for-net-next-2022-10-26' of git://git.kernel.org/pub/scm/linux...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_probe_helper.h>
41 #include <drm/amdgpu_drm.h>
42 #include <linux/vgaarb.h>
43 #include <linux/vga_switcheroo.h>
44 #include <linux/efi.h>
45 #include "amdgpu.h"
46 #include "amdgpu_trace.h"
47 #include "amdgpu_i2c.h"
48 #include "atom.h"
49 #include "amdgpu_atombios.h"
50 #include "amdgpu_atomfirmware.h"
51 #include "amd_pcie.h"
52 #ifdef CONFIG_DRM_AMDGPU_SI
53 #include "si.h"
54 #endif
55 #ifdef CONFIG_DRM_AMDGPU_CIK
56 #include "cik.h"
57 #endif
58 #include "vi.h"
59 #include "soc15.h"
60 #include "nv.h"
61 #include "bif/bif_4_1_d.h"
62 #include <linux/firmware.h>
63 #include "amdgpu_vf_error.h"
64
65 #include "amdgpu_amdkfd.h"
66 #include "amdgpu_pm.h"
67
68 #include "amdgpu_xgmi.h"
69 #include "amdgpu_ras.h"
70 #include "amdgpu_pmu.h"
71 #include "amdgpu_fru_eeprom.h"
72 #include "amdgpu_reset.h"
73
74 #include <linux/suspend.h>
75 #include <drm/task_barrier.h>
76 #include <linux/pm_runtime.h>
77
78 #include <drm/drm_drv.h>
79
80 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
87
88 #define AMDGPU_RESUME_MS                2000
89 #define AMDGPU_MAX_RETRY_LIMIT          2
90 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
91
92 const char *amdgpu_asic_name[] = {
93         "TAHITI",
94         "PITCAIRN",
95         "VERDE",
96         "OLAND",
97         "HAINAN",
98         "BONAIRE",
99         "KAVERI",
100         "KABINI",
101         "HAWAII",
102         "MULLINS",
103         "TOPAZ",
104         "TONGA",
105         "FIJI",
106         "CARRIZO",
107         "STONEY",
108         "POLARIS10",
109         "POLARIS11",
110         "POLARIS12",
111         "VEGAM",
112         "VEGA10",
113         "VEGA12",
114         "VEGA20",
115         "RAVEN",
116         "ARCTURUS",
117         "RENOIR",
118         "ALDEBARAN",
119         "NAVI10",
120         "CYAN_SKILLFISH",
121         "NAVI14",
122         "NAVI12",
123         "SIENNA_CICHLID",
124         "NAVY_FLOUNDER",
125         "VANGOGH",
126         "DIMGREY_CAVEFISH",
127         "BEIGE_GOBY",
128         "YELLOW_CARP",
129         "IP DISCOVERY",
130         "LAST",
131 };
132
133 /**
134  * DOC: pcie_replay_count
135  *
136  * The amdgpu driver provides a sysfs API for reporting the total number
137  * of PCIe replays (NAKs)
138  * The file pcie_replay_count is used for this and returns the total
139  * number of replays as a sum of the NAKs generated and NAKs received
140  */
141
142 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
143                 struct device_attribute *attr, char *buf)
144 {
145         struct drm_device *ddev = dev_get_drvdata(dev);
146         struct amdgpu_device *adev = drm_to_adev(ddev);
147         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
148
149         return sysfs_emit(buf, "%llu\n", cnt);
150 }
151
152 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
153                 amdgpu_device_get_pcie_replay_count, NULL);
154
155 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
156
157 /**
158  * DOC: product_name
159  *
160  * The amdgpu driver provides a sysfs API for reporting the product name
161  * for the device
162  * The file serial_number is used for this and returns the product name
163  * as returned from the FRU.
164  * NOTE: This is only available for certain server cards
165  */
166
167 static ssize_t amdgpu_device_get_product_name(struct device *dev,
168                 struct device_attribute *attr, char *buf)
169 {
170         struct drm_device *ddev = dev_get_drvdata(dev);
171         struct amdgpu_device *adev = drm_to_adev(ddev);
172
173         return sysfs_emit(buf, "%s\n", adev->product_name);
174 }
175
176 static DEVICE_ATTR(product_name, S_IRUGO,
177                 amdgpu_device_get_product_name, NULL);
178
179 /**
180  * DOC: product_number
181  *
182  * The amdgpu driver provides a sysfs API for reporting the part number
183  * for the device
184  * The file serial_number is used for this and returns the part number
185  * as returned from the FRU.
186  * NOTE: This is only available for certain server cards
187  */
188
189 static ssize_t amdgpu_device_get_product_number(struct device *dev,
190                 struct device_attribute *attr, char *buf)
191 {
192         struct drm_device *ddev = dev_get_drvdata(dev);
193         struct amdgpu_device *adev = drm_to_adev(ddev);
194
195         return sysfs_emit(buf, "%s\n", adev->product_number);
196 }
197
198 static DEVICE_ATTR(product_number, S_IRUGO,
199                 amdgpu_device_get_product_number, NULL);
200
201 /**
202  * DOC: serial_number
203  *
204  * The amdgpu driver provides a sysfs API for reporting the serial number
205  * for the device
206  * The file serial_number is used for this and returns the serial number
207  * as returned from the FRU.
208  * NOTE: This is only available for certain server cards
209  */
210
211 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
212                 struct device_attribute *attr, char *buf)
213 {
214         struct drm_device *ddev = dev_get_drvdata(dev);
215         struct amdgpu_device *adev = drm_to_adev(ddev);
216
217         return sysfs_emit(buf, "%s\n", adev->serial);
218 }
219
220 static DEVICE_ATTR(serial_number, S_IRUGO,
221                 amdgpu_device_get_serial_number, NULL);
222
223 /**
224  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
225  *
226  * @dev: drm_device pointer
227  *
228  * Returns true if the device is a dGPU with ATPX power control,
229  * otherwise return false.
230  */
231 bool amdgpu_device_supports_px(struct drm_device *dev)
232 {
233         struct amdgpu_device *adev = drm_to_adev(dev);
234
235         if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
236                 return true;
237         return false;
238 }
239
240 /**
241  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
242  *
243  * @dev: drm_device pointer
244  *
245  * Returns true if the device is a dGPU with ACPI power control,
246  * otherwise return false.
247  */
248 bool amdgpu_device_supports_boco(struct drm_device *dev)
249 {
250         struct amdgpu_device *adev = drm_to_adev(dev);
251
252         if (adev->has_pr3 ||
253             ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
254                 return true;
255         return false;
256 }
257
258 /**
259  * amdgpu_device_supports_baco - Does the device support BACO
260  *
261  * @dev: drm_device pointer
262  *
263  * Returns true if the device supporte BACO,
264  * otherwise return false.
265  */
266 bool amdgpu_device_supports_baco(struct drm_device *dev)
267 {
268         struct amdgpu_device *adev = drm_to_adev(dev);
269
270         return amdgpu_asic_supports_baco(adev);
271 }
272
273 /**
274  * amdgpu_device_supports_smart_shift - Is the device dGPU with
275  * smart shift support
276  *
277  * @dev: drm_device pointer
278  *
279  * Returns true if the device is a dGPU with Smart Shift support,
280  * otherwise returns false.
281  */
282 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
283 {
284         return (amdgpu_device_supports_boco(dev) &&
285                 amdgpu_acpi_is_power_shift_control_supported());
286 }
287
288 /*
289  * VRAM access helper functions
290  */
291
292 /**
293  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
294  *
295  * @adev: amdgpu_device pointer
296  * @pos: offset of the buffer in vram
297  * @buf: virtual address of the buffer in system memory
298  * @size: read/write size, sizeof(@buf) must > @size
299  * @write: true - write to vram, otherwise - read from vram
300  */
301 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
302                              void *buf, size_t size, bool write)
303 {
304         unsigned long flags;
305         uint32_t hi = ~0, tmp = 0;
306         uint32_t *data = buf;
307         uint64_t last;
308         int idx;
309
310         if (!drm_dev_enter(adev_to_drm(adev), &idx))
311                 return;
312
313         BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
314
315         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
316         for (last = pos + size; pos < last; pos += 4) {
317                 tmp = pos >> 31;
318
319                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
320                 if (tmp != hi) {
321                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
322                         hi = tmp;
323                 }
324                 if (write)
325                         WREG32_NO_KIQ(mmMM_DATA, *data++);
326                 else
327                         *data++ = RREG32_NO_KIQ(mmMM_DATA);
328         }
329
330         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
331         drm_dev_exit(idx);
332 }
333
334 /**
335  * amdgpu_device_aper_access - access vram by vram aperature
336  *
337  * @adev: amdgpu_device pointer
338  * @pos: offset of the buffer in vram
339  * @buf: virtual address of the buffer in system memory
340  * @size: read/write size, sizeof(@buf) must > @size
341  * @write: true - write to vram, otherwise - read from vram
342  *
343  * The return value means how many bytes have been transferred.
344  */
345 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
346                                  void *buf, size_t size, bool write)
347 {
348 #ifdef CONFIG_64BIT
349         void __iomem *addr;
350         size_t count = 0;
351         uint64_t last;
352
353         if (!adev->mman.aper_base_kaddr)
354                 return 0;
355
356         last = min(pos + size, adev->gmc.visible_vram_size);
357         if (last > pos) {
358                 addr = adev->mman.aper_base_kaddr + pos;
359                 count = last - pos;
360
361                 if (write) {
362                         memcpy_toio(addr, buf, count);
363                         mb();
364                         amdgpu_device_flush_hdp(adev, NULL);
365                 } else {
366                         amdgpu_device_invalidate_hdp(adev, NULL);
367                         mb();
368                         memcpy_fromio(buf, addr, count);
369                 }
370
371         }
372
373         return count;
374 #else
375         return 0;
376 #endif
377 }
378
379 /**
380  * amdgpu_device_vram_access - read/write a buffer in vram
381  *
382  * @adev: amdgpu_device pointer
383  * @pos: offset of the buffer in vram
384  * @buf: virtual address of the buffer in system memory
385  * @size: read/write size, sizeof(@buf) must > @size
386  * @write: true - write to vram, otherwise - read from vram
387  */
388 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
389                                void *buf, size_t size, bool write)
390 {
391         size_t count;
392
393         /* try to using vram apreature to access vram first */
394         count = amdgpu_device_aper_access(adev, pos, buf, size, write);
395         size -= count;
396         if (size) {
397                 /* using MM to access rest vram */
398                 pos += count;
399                 buf += count;
400                 amdgpu_device_mm_access(adev, pos, buf, size, write);
401         }
402 }
403
404 /*
405  * register access helper functions.
406  */
407
408 /* Check if hw access should be skipped because of hotplug or device error */
409 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
410 {
411         if (adev->no_hw_access)
412                 return true;
413
414 #ifdef CONFIG_LOCKDEP
415         /*
416          * This is a bit complicated to understand, so worth a comment. What we assert
417          * here is that the GPU reset is not running on another thread in parallel.
418          *
419          * For this we trylock the read side of the reset semaphore, if that succeeds
420          * we know that the reset is not running in paralell.
421          *
422          * If the trylock fails we assert that we are either already holding the read
423          * side of the lock or are the reset thread itself and hold the write side of
424          * the lock.
425          */
426         if (in_task()) {
427                 if (down_read_trylock(&adev->reset_domain->sem))
428                         up_read(&adev->reset_domain->sem);
429                 else
430                         lockdep_assert_held(&adev->reset_domain->sem);
431         }
432 #endif
433         return false;
434 }
435
436 /**
437  * amdgpu_device_rreg - read a memory mapped IO or indirect register
438  *
439  * @adev: amdgpu_device pointer
440  * @reg: dword aligned register offset
441  * @acc_flags: access flags which require special behavior
442  *
443  * Returns the 32 bit value from the offset specified.
444  */
445 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
446                             uint32_t reg, uint32_t acc_flags)
447 {
448         uint32_t ret;
449
450         if (amdgpu_device_skip_hw_access(adev))
451                 return 0;
452
453         if ((reg * 4) < adev->rmmio_size) {
454                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
455                     amdgpu_sriov_runtime(adev) &&
456                     down_read_trylock(&adev->reset_domain->sem)) {
457                         ret = amdgpu_kiq_rreg(adev, reg);
458                         up_read(&adev->reset_domain->sem);
459                 } else {
460                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
461                 }
462         } else {
463                 ret = adev->pcie_rreg(adev, reg * 4);
464         }
465
466         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
467
468         return ret;
469 }
470
471 /*
472  * MMIO register read with bytes helper functions
473  * @offset:bytes offset from MMIO start
474  *
475 */
476
477 /**
478  * amdgpu_mm_rreg8 - read a memory mapped IO register
479  *
480  * @adev: amdgpu_device pointer
481  * @offset: byte aligned register offset
482  *
483  * Returns the 8 bit value from the offset specified.
484  */
485 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
486 {
487         if (amdgpu_device_skip_hw_access(adev))
488                 return 0;
489
490         if (offset < adev->rmmio_size)
491                 return (readb(adev->rmmio + offset));
492         BUG();
493 }
494
495 /*
496  * MMIO register write with bytes helper functions
497  * @offset:bytes offset from MMIO start
498  * @value: the value want to be written to the register
499  *
500 */
501 /**
502  * amdgpu_mm_wreg8 - read a memory mapped IO register
503  *
504  * @adev: amdgpu_device pointer
505  * @offset: byte aligned register offset
506  * @value: 8 bit value to write
507  *
508  * Writes the value specified to the offset specified.
509  */
510 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
511 {
512         if (amdgpu_device_skip_hw_access(adev))
513                 return;
514
515         if (offset < adev->rmmio_size)
516                 writeb(value, adev->rmmio + offset);
517         else
518                 BUG();
519 }
520
521 /**
522  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
523  *
524  * @adev: amdgpu_device pointer
525  * @reg: dword aligned register offset
526  * @v: 32 bit value to write to the register
527  * @acc_flags: access flags which require special behavior
528  *
529  * Writes the value specified to the offset specified.
530  */
531 void amdgpu_device_wreg(struct amdgpu_device *adev,
532                         uint32_t reg, uint32_t v,
533                         uint32_t acc_flags)
534 {
535         if (amdgpu_device_skip_hw_access(adev))
536                 return;
537
538         if ((reg * 4) < adev->rmmio_size) {
539                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
540                     amdgpu_sriov_runtime(adev) &&
541                     down_read_trylock(&adev->reset_domain->sem)) {
542                         amdgpu_kiq_wreg(adev, reg, v);
543                         up_read(&adev->reset_domain->sem);
544                 } else {
545                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
546                 }
547         } else {
548                 adev->pcie_wreg(adev, reg * 4, v);
549         }
550
551         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
552 }
553
554 /**
555  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
556  *
557  * @adev: amdgpu_device pointer
558  * @reg: mmio/rlc register
559  * @v: value to write
560  *
561  * this function is invoked only for the debugfs register access
562  */
563 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
564                              uint32_t reg, uint32_t v)
565 {
566         if (amdgpu_device_skip_hw_access(adev))
567                 return;
568
569         if (amdgpu_sriov_fullaccess(adev) &&
570             adev->gfx.rlc.funcs &&
571             adev->gfx.rlc.funcs->is_rlcg_access_range) {
572                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
573                         return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
574         } else if ((reg * 4) >= adev->rmmio_size) {
575                 adev->pcie_wreg(adev, reg * 4, v);
576         } else {
577                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
578         }
579 }
580
581 /**
582  * amdgpu_mm_rdoorbell - read a doorbell dword
583  *
584  * @adev: amdgpu_device pointer
585  * @index: doorbell index
586  *
587  * Returns the value in the doorbell aperture at the
588  * requested doorbell index (CIK).
589  */
590 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
591 {
592         if (amdgpu_device_skip_hw_access(adev))
593                 return 0;
594
595         if (index < adev->doorbell.num_doorbells) {
596                 return readl(adev->doorbell.ptr + index);
597         } else {
598                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
599                 return 0;
600         }
601 }
602
603 /**
604  * amdgpu_mm_wdoorbell - write a doorbell dword
605  *
606  * @adev: amdgpu_device pointer
607  * @index: doorbell index
608  * @v: value to write
609  *
610  * Writes @v to the doorbell aperture at the
611  * requested doorbell index (CIK).
612  */
613 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
614 {
615         if (amdgpu_device_skip_hw_access(adev))
616                 return;
617
618         if (index < adev->doorbell.num_doorbells) {
619                 writel(v, adev->doorbell.ptr + index);
620         } else {
621                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
622         }
623 }
624
625 /**
626  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
627  *
628  * @adev: amdgpu_device pointer
629  * @index: doorbell index
630  *
631  * Returns the value in the doorbell aperture at the
632  * requested doorbell index (VEGA10+).
633  */
634 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
635 {
636         if (amdgpu_device_skip_hw_access(adev))
637                 return 0;
638
639         if (index < adev->doorbell.num_doorbells) {
640                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
641         } else {
642                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
643                 return 0;
644         }
645 }
646
647 /**
648  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
649  *
650  * @adev: amdgpu_device pointer
651  * @index: doorbell index
652  * @v: value to write
653  *
654  * Writes @v to the doorbell aperture at the
655  * requested doorbell index (VEGA10+).
656  */
657 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
658 {
659         if (amdgpu_device_skip_hw_access(adev))
660                 return;
661
662         if (index < adev->doorbell.num_doorbells) {
663                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
664         } else {
665                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
666         }
667 }
668
669 /**
670  * amdgpu_device_indirect_rreg - read an indirect register
671  *
672  * @adev: amdgpu_device pointer
673  * @pcie_index: mmio register offset
674  * @pcie_data: mmio register offset
675  * @reg_addr: indirect register address to read from
676  *
677  * Returns the value of indirect register @reg_addr
678  */
679 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
680                                 u32 pcie_index, u32 pcie_data,
681                                 u32 reg_addr)
682 {
683         unsigned long flags;
684         u32 r;
685         void __iomem *pcie_index_offset;
686         void __iomem *pcie_data_offset;
687
688         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
689         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
690         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
691
692         writel(reg_addr, pcie_index_offset);
693         readl(pcie_index_offset);
694         r = readl(pcie_data_offset);
695         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
696
697         return r;
698 }
699
700 /**
701  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
702  *
703  * @adev: amdgpu_device pointer
704  * @pcie_index: mmio register offset
705  * @pcie_data: mmio register offset
706  * @reg_addr: indirect register address to read from
707  *
708  * Returns the value of indirect register @reg_addr
709  */
710 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
711                                   u32 pcie_index, u32 pcie_data,
712                                   u32 reg_addr)
713 {
714         unsigned long flags;
715         u64 r;
716         void __iomem *pcie_index_offset;
717         void __iomem *pcie_data_offset;
718
719         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
720         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
721         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
722
723         /* read low 32 bits */
724         writel(reg_addr, pcie_index_offset);
725         readl(pcie_index_offset);
726         r = readl(pcie_data_offset);
727         /* read high 32 bits */
728         writel(reg_addr + 4, pcie_index_offset);
729         readl(pcie_index_offset);
730         r |= ((u64)readl(pcie_data_offset) << 32);
731         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
732
733         return r;
734 }
735
736 /**
737  * amdgpu_device_indirect_wreg - write an indirect register address
738  *
739  * @adev: amdgpu_device pointer
740  * @pcie_index: mmio register offset
741  * @pcie_data: mmio register offset
742  * @reg_addr: indirect register offset
743  * @reg_data: indirect register data
744  *
745  */
746 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
747                                  u32 pcie_index, u32 pcie_data,
748                                  u32 reg_addr, u32 reg_data)
749 {
750         unsigned long flags;
751         void __iomem *pcie_index_offset;
752         void __iomem *pcie_data_offset;
753
754         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
755         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
756         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
757
758         writel(reg_addr, pcie_index_offset);
759         readl(pcie_index_offset);
760         writel(reg_data, pcie_data_offset);
761         readl(pcie_data_offset);
762         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
763 }
764
765 /**
766  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
767  *
768  * @adev: amdgpu_device pointer
769  * @pcie_index: mmio register offset
770  * @pcie_data: mmio register offset
771  * @reg_addr: indirect register offset
772  * @reg_data: indirect register data
773  *
774  */
775 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
776                                    u32 pcie_index, u32 pcie_data,
777                                    u32 reg_addr, u64 reg_data)
778 {
779         unsigned long flags;
780         void __iomem *pcie_index_offset;
781         void __iomem *pcie_data_offset;
782
783         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
784         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
785         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
786
787         /* write low 32 bits */
788         writel(reg_addr, pcie_index_offset);
789         readl(pcie_index_offset);
790         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
791         readl(pcie_data_offset);
792         /* write high 32 bits */
793         writel(reg_addr + 4, pcie_index_offset);
794         readl(pcie_index_offset);
795         writel((u32)(reg_data >> 32), pcie_data_offset);
796         readl(pcie_data_offset);
797         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
798 }
799
800 /**
801  * amdgpu_invalid_rreg - dummy reg read function
802  *
803  * @adev: amdgpu_device pointer
804  * @reg: offset of register
805  *
806  * Dummy register read function.  Used for register blocks
807  * that certain asics don't have (all asics).
808  * Returns the value in the register.
809  */
810 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
811 {
812         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
813         BUG();
814         return 0;
815 }
816
817 /**
818  * amdgpu_invalid_wreg - dummy reg write function
819  *
820  * @adev: amdgpu_device pointer
821  * @reg: offset of register
822  * @v: value to write to the register
823  *
824  * Dummy register read function.  Used for register blocks
825  * that certain asics don't have (all asics).
826  */
827 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
828 {
829         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
830                   reg, v);
831         BUG();
832 }
833
834 /**
835  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
836  *
837  * @adev: amdgpu_device pointer
838  * @reg: offset of register
839  *
840  * Dummy register read function.  Used for register blocks
841  * that certain asics don't have (all asics).
842  * Returns the value in the register.
843  */
844 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
845 {
846         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
847         BUG();
848         return 0;
849 }
850
851 /**
852  * amdgpu_invalid_wreg64 - dummy reg write function
853  *
854  * @adev: amdgpu_device pointer
855  * @reg: offset of register
856  * @v: value to write to the register
857  *
858  * Dummy register read function.  Used for register blocks
859  * that certain asics don't have (all asics).
860  */
861 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
862 {
863         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
864                   reg, v);
865         BUG();
866 }
867
868 /**
869  * amdgpu_block_invalid_rreg - dummy reg read function
870  *
871  * @adev: amdgpu_device pointer
872  * @block: offset of instance
873  * @reg: offset of register
874  *
875  * Dummy register read function.  Used for register blocks
876  * that certain asics don't have (all asics).
877  * Returns the value in the register.
878  */
879 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
880                                           uint32_t block, uint32_t reg)
881 {
882         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
883                   reg, block);
884         BUG();
885         return 0;
886 }
887
888 /**
889  * amdgpu_block_invalid_wreg - dummy reg write function
890  *
891  * @adev: amdgpu_device pointer
892  * @block: offset of instance
893  * @reg: offset of register
894  * @v: value to write to the register
895  *
896  * Dummy register read function.  Used for register blocks
897  * that certain asics don't have (all asics).
898  */
899 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
900                                       uint32_t block,
901                                       uint32_t reg, uint32_t v)
902 {
903         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
904                   reg, block, v);
905         BUG();
906 }
907
908 /**
909  * amdgpu_device_asic_init - Wrapper for atom asic_init
910  *
911  * @adev: amdgpu_device pointer
912  *
913  * Does any asic specific work and then calls atom asic init.
914  */
915 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
916 {
917         amdgpu_asic_pre_asic_init(adev);
918
919         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
920                 return amdgpu_atomfirmware_asic_init(adev, true);
921         else
922                 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
923 }
924
925 /**
926  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
927  *
928  * @adev: amdgpu_device pointer
929  *
930  * Allocates a scratch page of VRAM for use by various things in the
931  * driver.
932  */
933 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
934 {
935         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
936                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
937                                        &adev->vram_scratch.robj,
938                                        &adev->vram_scratch.gpu_addr,
939                                        (void **)&adev->vram_scratch.ptr);
940 }
941
942 /**
943  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
944  *
945  * @adev: amdgpu_device pointer
946  *
947  * Frees the VRAM scratch page.
948  */
949 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
950 {
951         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
952 }
953
954 /**
955  * amdgpu_device_program_register_sequence - program an array of registers.
956  *
957  * @adev: amdgpu_device pointer
958  * @registers: pointer to the register array
959  * @array_size: size of the register array
960  *
961  * Programs an array or registers with and and or masks.
962  * This is a helper for setting golden registers.
963  */
964 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
965                                              const u32 *registers,
966                                              const u32 array_size)
967 {
968         u32 tmp, reg, and_mask, or_mask;
969         int i;
970
971         if (array_size % 3)
972                 return;
973
974         for (i = 0; i < array_size; i +=3) {
975                 reg = registers[i + 0];
976                 and_mask = registers[i + 1];
977                 or_mask = registers[i + 2];
978
979                 if (and_mask == 0xffffffff) {
980                         tmp = or_mask;
981                 } else {
982                         tmp = RREG32(reg);
983                         tmp &= ~and_mask;
984                         if (adev->family >= AMDGPU_FAMILY_AI)
985                                 tmp |= (or_mask & and_mask);
986                         else
987                                 tmp |= or_mask;
988                 }
989                 WREG32(reg, tmp);
990         }
991 }
992
993 /**
994  * amdgpu_device_pci_config_reset - reset the GPU
995  *
996  * @adev: amdgpu_device pointer
997  *
998  * Resets the GPU using the pci config reset sequence.
999  * Only applicable to asics prior to vega10.
1000  */
1001 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1002 {
1003         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1004 }
1005
1006 /**
1007  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1008  *
1009  * @adev: amdgpu_device pointer
1010  *
1011  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1012  */
1013 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1014 {
1015         return pci_reset_function(adev->pdev);
1016 }
1017
1018 /*
1019  * GPU doorbell aperture helpers function.
1020  */
1021 /**
1022  * amdgpu_device_doorbell_init - Init doorbell driver information.
1023  *
1024  * @adev: amdgpu_device pointer
1025  *
1026  * Init doorbell driver information (CIK)
1027  * Returns 0 on success, error on failure.
1028  */
1029 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1030 {
1031
1032         /* No doorbell on SI hardware generation */
1033         if (adev->asic_type < CHIP_BONAIRE) {
1034                 adev->doorbell.base = 0;
1035                 adev->doorbell.size = 0;
1036                 adev->doorbell.num_doorbells = 0;
1037                 adev->doorbell.ptr = NULL;
1038                 return 0;
1039         }
1040
1041         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1042                 return -EINVAL;
1043
1044         amdgpu_asic_init_doorbell_index(adev);
1045
1046         /* doorbell bar mapping */
1047         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1048         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1049
1050         if (adev->enable_mes) {
1051                 adev->doorbell.num_doorbells =
1052                         adev->doorbell.size / sizeof(u32);
1053         } else {
1054                 adev->doorbell.num_doorbells =
1055                         min_t(u32, adev->doorbell.size / sizeof(u32),
1056                               adev->doorbell_index.max_assignment+1);
1057                 if (adev->doorbell.num_doorbells == 0)
1058                         return -EINVAL;
1059
1060                 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1061                  * paging queue doorbell use the second page. The
1062                  * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1063                  * doorbells are in the first page. So with paging queue enabled,
1064                  * the max num_doorbells should + 1 page (0x400 in dword)
1065                  */
1066                 if (adev->asic_type >= CHIP_VEGA10)
1067                         adev->doorbell.num_doorbells += 0x400;
1068         }
1069
1070         adev->doorbell.ptr = ioremap(adev->doorbell.base,
1071                                      adev->doorbell.num_doorbells *
1072                                      sizeof(u32));
1073         if (adev->doorbell.ptr == NULL)
1074                 return -ENOMEM;
1075
1076         return 0;
1077 }
1078
1079 /**
1080  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1081  *
1082  * @adev: amdgpu_device pointer
1083  *
1084  * Tear down doorbell driver information (CIK)
1085  */
1086 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1087 {
1088         iounmap(adev->doorbell.ptr);
1089         adev->doorbell.ptr = NULL;
1090 }
1091
1092
1093
1094 /*
1095  * amdgpu_device_wb_*()
1096  * Writeback is the method by which the GPU updates special pages in memory
1097  * with the status of certain GPU events (fences, ring pointers,etc.).
1098  */
1099
1100 /**
1101  * amdgpu_device_wb_fini - Disable Writeback and free memory
1102  *
1103  * @adev: amdgpu_device pointer
1104  *
1105  * Disables Writeback and frees the Writeback memory (all asics).
1106  * Used at driver shutdown.
1107  */
1108 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1109 {
1110         if (adev->wb.wb_obj) {
1111                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1112                                       &adev->wb.gpu_addr,
1113                                       (void **)&adev->wb.wb);
1114                 adev->wb.wb_obj = NULL;
1115         }
1116 }
1117
1118 /**
1119  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1120  *
1121  * @adev: amdgpu_device pointer
1122  *
1123  * Initializes writeback and allocates writeback memory (all asics).
1124  * Used at driver startup.
1125  * Returns 0 on success or an -error on failure.
1126  */
1127 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1128 {
1129         int r;
1130
1131         if (adev->wb.wb_obj == NULL) {
1132                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1133                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1134                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1135                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1136                                             (void **)&adev->wb.wb);
1137                 if (r) {
1138                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1139                         return r;
1140                 }
1141
1142                 adev->wb.num_wb = AMDGPU_MAX_WB;
1143                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1144
1145                 /* clear wb memory */
1146                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1147         }
1148
1149         return 0;
1150 }
1151
1152 /**
1153  * amdgpu_device_wb_get - Allocate a wb entry
1154  *
1155  * @adev: amdgpu_device pointer
1156  * @wb: wb index
1157  *
1158  * Allocate a wb slot for use by the driver (all asics).
1159  * Returns 0 on success or -EINVAL on failure.
1160  */
1161 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1162 {
1163         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1164
1165         if (offset < adev->wb.num_wb) {
1166                 __set_bit(offset, adev->wb.used);
1167                 *wb = offset << 3; /* convert to dw offset */
1168                 return 0;
1169         } else {
1170                 return -EINVAL;
1171         }
1172 }
1173
1174 /**
1175  * amdgpu_device_wb_free - Free a wb entry
1176  *
1177  * @adev: amdgpu_device pointer
1178  * @wb: wb index
1179  *
1180  * Free a wb slot allocated for use by the driver (all asics)
1181  */
1182 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1183 {
1184         wb >>= 3;
1185         if (wb < adev->wb.num_wb)
1186                 __clear_bit(wb, adev->wb.used);
1187 }
1188
1189 /**
1190  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1191  *
1192  * @adev: amdgpu_device pointer
1193  *
1194  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1195  * to fail, but if any of the BARs is not accessible after the size we abort
1196  * driver loading by returning -ENODEV.
1197  */
1198 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1199 {
1200         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1201         struct pci_bus *root;
1202         struct resource *res;
1203         unsigned i;
1204         u16 cmd;
1205         int r;
1206
1207         /* Bypass for VF */
1208         if (amdgpu_sriov_vf(adev))
1209                 return 0;
1210
1211         /* skip if the bios has already enabled large BAR */
1212         if (adev->gmc.real_vram_size &&
1213             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1214                 return 0;
1215
1216         /* Check if the root BUS has 64bit memory resources */
1217         root = adev->pdev->bus;
1218         while (root->parent)
1219                 root = root->parent;
1220
1221         pci_bus_for_each_resource(root, res, i) {
1222                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1223                     res->start > 0x100000000ull)
1224                         break;
1225         }
1226
1227         /* Trying to resize is pointless without a root hub window above 4GB */
1228         if (!res)
1229                 return 0;
1230
1231         /* Limit the BAR size to what is available */
1232         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1233                         rbar_size);
1234
1235         /* Disable memory decoding while we change the BAR addresses and size */
1236         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1237         pci_write_config_word(adev->pdev, PCI_COMMAND,
1238                               cmd & ~PCI_COMMAND_MEMORY);
1239
1240         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1241         amdgpu_device_doorbell_fini(adev);
1242         if (adev->asic_type >= CHIP_BONAIRE)
1243                 pci_release_resource(adev->pdev, 2);
1244
1245         pci_release_resource(adev->pdev, 0);
1246
1247         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1248         if (r == -ENOSPC)
1249                 DRM_INFO("Not enough PCI address space for a large BAR.");
1250         else if (r && r != -ENOTSUPP)
1251                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1252
1253         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1254
1255         /* When the doorbell or fb BAR isn't available we have no chance of
1256          * using the device.
1257          */
1258         r = amdgpu_device_doorbell_init(adev);
1259         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1260                 return -ENODEV;
1261
1262         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1263
1264         return 0;
1265 }
1266
1267 /*
1268  * GPU helpers function.
1269  */
1270 /**
1271  * amdgpu_device_need_post - check if the hw need post or not
1272  *
1273  * @adev: amdgpu_device pointer
1274  *
1275  * Check if the asic has been initialized (all asics) at driver startup
1276  * or post is needed if  hw reset is performed.
1277  * Returns true if need or false if not.
1278  */
1279 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1280 {
1281         uint32_t reg;
1282
1283         if (amdgpu_sriov_vf(adev))
1284                 return false;
1285
1286         if (amdgpu_passthrough(adev)) {
1287                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1288                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1289                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1290                  * vpost executed for smc version below 22.15
1291                  */
1292                 if (adev->asic_type == CHIP_FIJI) {
1293                         int err;
1294                         uint32_t fw_ver;
1295                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1296                         /* force vPost if error occured */
1297                         if (err)
1298                                 return true;
1299
1300                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1301                         if (fw_ver < 0x00160e00)
1302                                 return true;
1303                 }
1304         }
1305
1306         /* Don't post if we need to reset whole hive on init */
1307         if (adev->gmc.xgmi.pending_reset)
1308                 return false;
1309
1310         if (adev->has_hw_reset) {
1311                 adev->has_hw_reset = false;
1312                 return true;
1313         }
1314
1315         /* bios scratch used on CIK+ */
1316         if (adev->asic_type >= CHIP_BONAIRE)
1317                 return amdgpu_atombios_scratch_need_asic_init(adev);
1318
1319         /* check MEM_SIZE for older asics */
1320         reg = amdgpu_asic_get_config_memsize(adev);
1321
1322         if ((reg != 0) && (reg != 0xffffffff))
1323                 return false;
1324
1325         return true;
1326 }
1327
1328 /**
1329  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1330  *
1331  * @adev: amdgpu_device pointer
1332  *
1333  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1334  * be set for this device.
1335  *
1336  * Returns true if it should be used or false if not.
1337  */
1338 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1339 {
1340         switch (amdgpu_aspm) {
1341         case -1:
1342                 break;
1343         case 0:
1344                 return false;
1345         case 1:
1346                 return true;
1347         default:
1348                 return false;
1349         }
1350         return pcie_aspm_enabled(adev->pdev);
1351 }
1352
1353 /* if we get transitioned to only one device, take VGA back */
1354 /**
1355  * amdgpu_device_vga_set_decode - enable/disable vga decode
1356  *
1357  * @pdev: PCI device pointer
1358  * @state: enable/disable vga decode
1359  *
1360  * Enable/disable vga decode (all asics).
1361  * Returns VGA resource flags.
1362  */
1363 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1364                 bool state)
1365 {
1366         struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1367         amdgpu_asic_set_vga_state(adev, state);
1368         if (state)
1369                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1370                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1371         else
1372                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1373 }
1374
1375 /**
1376  * amdgpu_device_check_block_size - validate the vm block size
1377  *
1378  * @adev: amdgpu_device pointer
1379  *
1380  * Validates the vm block size specified via module parameter.
1381  * The vm block size defines number of bits in page table versus page directory,
1382  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1383  * page table and the remaining bits are in the page directory.
1384  */
1385 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1386 {
1387         /* defines number of bits in page table versus page directory,
1388          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1389          * page table and the remaining bits are in the page directory */
1390         if (amdgpu_vm_block_size == -1)
1391                 return;
1392
1393         if (amdgpu_vm_block_size < 9) {
1394                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1395                          amdgpu_vm_block_size);
1396                 amdgpu_vm_block_size = -1;
1397         }
1398 }
1399
1400 /**
1401  * amdgpu_device_check_vm_size - validate the vm size
1402  *
1403  * @adev: amdgpu_device pointer
1404  *
1405  * Validates the vm size in GB specified via module parameter.
1406  * The VM size is the size of the GPU virtual memory space in GB.
1407  */
1408 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1409 {
1410         /* no need to check the default value */
1411         if (amdgpu_vm_size == -1)
1412                 return;
1413
1414         if (amdgpu_vm_size < 1) {
1415                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1416                          amdgpu_vm_size);
1417                 amdgpu_vm_size = -1;
1418         }
1419 }
1420
1421 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1422 {
1423         struct sysinfo si;
1424         bool is_os_64 = (sizeof(void *) == 8);
1425         uint64_t total_memory;
1426         uint64_t dram_size_seven_GB = 0x1B8000000;
1427         uint64_t dram_size_three_GB = 0xB8000000;
1428
1429         if (amdgpu_smu_memory_pool_size == 0)
1430                 return;
1431
1432         if (!is_os_64) {
1433                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1434                 goto def_value;
1435         }
1436         si_meminfo(&si);
1437         total_memory = (uint64_t)si.totalram * si.mem_unit;
1438
1439         if ((amdgpu_smu_memory_pool_size == 1) ||
1440                 (amdgpu_smu_memory_pool_size == 2)) {
1441                 if (total_memory < dram_size_three_GB)
1442                         goto def_value1;
1443         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1444                 (amdgpu_smu_memory_pool_size == 8)) {
1445                 if (total_memory < dram_size_seven_GB)
1446                         goto def_value1;
1447         } else {
1448                 DRM_WARN("Smu memory pool size not supported\n");
1449                 goto def_value;
1450         }
1451         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1452
1453         return;
1454
1455 def_value1:
1456         DRM_WARN("No enough system memory\n");
1457 def_value:
1458         adev->pm.smu_prv_buffer_size = 0;
1459 }
1460
1461 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1462 {
1463         if (!(adev->flags & AMD_IS_APU) ||
1464             adev->asic_type < CHIP_RAVEN)
1465                 return 0;
1466
1467         switch (adev->asic_type) {
1468         case CHIP_RAVEN:
1469                 if (adev->pdev->device == 0x15dd)
1470                         adev->apu_flags |= AMD_APU_IS_RAVEN;
1471                 if (adev->pdev->device == 0x15d8)
1472                         adev->apu_flags |= AMD_APU_IS_PICASSO;
1473                 break;
1474         case CHIP_RENOIR:
1475                 if ((adev->pdev->device == 0x1636) ||
1476                     (adev->pdev->device == 0x164c))
1477                         adev->apu_flags |= AMD_APU_IS_RENOIR;
1478                 else
1479                         adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1480                 break;
1481         case CHIP_VANGOGH:
1482                 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1483                 break;
1484         case CHIP_YELLOW_CARP:
1485                 break;
1486         case CHIP_CYAN_SKILLFISH:
1487                 if ((adev->pdev->device == 0x13FE) ||
1488                     (adev->pdev->device == 0x143F))
1489                         adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1490                 break;
1491         default:
1492                 break;
1493         }
1494
1495         return 0;
1496 }
1497
1498 /**
1499  * amdgpu_device_check_arguments - validate module params
1500  *
1501  * @adev: amdgpu_device pointer
1502  *
1503  * Validates certain module parameters and updates
1504  * the associated values used by the driver (all asics).
1505  */
1506 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1507 {
1508         if (amdgpu_sched_jobs < 4) {
1509                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1510                          amdgpu_sched_jobs);
1511                 amdgpu_sched_jobs = 4;
1512         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1513                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1514                          amdgpu_sched_jobs);
1515                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1516         }
1517
1518         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1519                 /* gart size must be greater or equal to 32M */
1520                 dev_warn(adev->dev, "gart size (%d) too small\n",
1521                          amdgpu_gart_size);
1522                 amdgpu_gart_size = -1;
1523         }
1524
1525         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1526                 /* gtt size must be greater or equal to 32M */
1527                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1528                                  amdgpu_gtt_size);
1529                 amdgpu_gtt_size = -1;
1530         }
1531
1532         /* valid range is between 4 and 9 inclusive */
1533         if (amdgpu_vm_fragment_size != -1 &&
1534             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1535                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1536                 amdgpu_vm_fragment_size = -1;
1537         }
1538
1539         if (amdgpu_sched_hw_submission < 2) {
1540                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1541                          amdgpu_sched_hw_submission);
1542                 amdgpu_sched_hw_submission = 2;
1543         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1544                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1545                          amdgpu_sched_hw_submission);
1546                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1547         }
1548
1549         if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1550                 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1551                 amdgpu_reset_method = -1;
1552         }
1553
1554         amdgpu_device_check_smu_prv_buffer_size(adev);
1555
1556         amdgpu_device_check_vm_size(adev);
1557
1558         amdgpu_device_check_block_size(adev);
1559
1560         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1561
1562         return 0;
1563 }
1564
1565 /**
1566  * amdgpu_switcheroo_set_state - set switcheroo state
1567  *
1568  * @pdev: pci dev pointer
1569  * @state: vga_switcheroo state
1570  *
1571  * Callback for the switcheroo driver.  Suspends or resumes the
1572  * the asics before or after it is powered up using ACPI methods.
1573  */
1574 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1575                                         enum vga_switcheroo_state state)
1576 {
1577         struct drm_device *dev = pci_get_drvdata(pdev);
1578         int r;
1579
1580         if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1581                 return;
1582
1583         if (state == VGA_SWITCHEROO_ON) {
1584                 pr_info("switched on\n");
1585                 /* don't suspend or resume card normally */
1586                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1587
1588                 pci_set_power_state(pdev, PCI_D0);
1589                 amdgpu_device_load_pci_state(pdev);
1590                 r = pci_enable_device(pdev);
1591                 if (r)
1592                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1593                 amdgpu_device_resume(dev, true);
1594
1595                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1596         } else {
1597                 pr_info("switched off\n");
1598                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1599                 amdgpu_device_suspend(dev, true);
1600                 amdgpu_device_cache_pci_state(pdev);
1601                 /* Shut down the device */
1602                 pci_disable_device(pdev);
1603                 pci_set_power_state(pdev, PCI_D3cold);
1604                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1605         }
1606 }
1607
1608 /**
1609  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1610  *
1611  * @pdev: pci dev pointer
1612  *
1613  * Callback for the switcheroo driver.  Check of the switcheroo
1614  * state can be changed.
1615  * Returns true if the state can be changed, false if not.
1616  */
1617 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1618 {
1619         struct drm_device *dev = pci_get_drvdata(pdev);
1620
1621         /*
1622         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1623         * locking inversion with the driver load path. And the access here is
1624         * completely racy anyway. So don't bother with locking for now.
1625         */
1626         return atomic_read(&dev->open_count) == 0;
1627 }
1628
1629 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1630         .set_gpu_state = amdgpu_switcheroo_set_state,
1631         .reprobe = NULL,
1632         .can_switch = amdgpu_switcheroo_can_switch,
1633 };
1634
1635 /**
1636  * amdgpu_device_ip_set_clockgating_state - set the CG state
1637  *
1638  * @dev: amdgpu_device pointer
1639  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1640  * @state: clockgating state (gate or ungate)
1641  *
1642  * Sets the requested clockgating state for all instances of
1643  * the hardware IP specified.
1644  * Returns the error code from the last instance.
1645  */
1646 int amdgpu_device_ip_set_clockgating_state(void *dev,
1647                                            enum amd_ip_block_type block_type,
1648                                            enum amd_clockgating_state state)
1649 {
1650         struct amdgpu_device *adev = dev;
1651         int i, r = 0;
1652
1653         for (i = 0; i < adev->num_ip_blocks; i++) {
1654                 if (!adev->ip_blocks[i].status.valid)
1655                         continue;
1656                 if (adev->ip_blocks[i].version->type != block_type)
1657                         continue;
1658                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1659                         continue;
1660                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1661                         (void *)adev, state);
1662                 if (r)
1663                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1664                                   adev->ip_blocks[i].version->funcs->name, r);
1665         }
1666         return r;
1667 }
1668
1669 /**
1670  * amdgpu_device_ip_set_powergating_state - set the PG state
1671  *
1672  * @dev: amdgpu_device pointer
1673  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1674  * @state: powergating state (gate or ungate)
1675  *
1676  * Sets the requested powergating state for all instances of
1677  * the hardware IP specified.
1678  * Returns the error code from the last instance.
1679  */
1680 int amdgpu_device_ip_set_powergating_state(void *dev,
1681                                            enum amd_ip_block_type block_type,
1682                                            enum amd_powergating_state state)
1683 {
1684         struct amdgpu_device *adev = dev;
1685         int i, r = 0;
1686
1687         for (i = 0; i < adev->num_ip_blocks; i++) {
1688                 if (!adev->ip_blocks[i].status.valid)
1689                         continue;
1690                 if (adev->ip_blocks[i].version->type != block_type)
1691                         continue;
1692                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1693                         continue;
1694                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1695                         (void *)adev, state);
1696                 if (r)
1697                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1698                                   adev->ip_blocks[i].version->funcs->name, r);
1699         }
1700         return r;
1701 }
1702
1703 /**
1704  * amdgpu_device_ip_get_clockgating_state - get the CG state
1705  *
1706  * @adev: amdgpu_device pointer
1707  * @flags: clockgating feature flags
1708  *
1709  * Walks the list of IPs on the device and updates the clockgating
1710  * flags for each IP.
1711  * Updates @flags with the feature flags for each hardware IP where
1712  * clockgating is enabled.
1713  */
1714 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1715                                             u64 *flags)
1716 {
1717         int i;
1718
1719         for (i = 0; i < adev->num_ip_blocks; i++) {
1720                 if (!adev->ip_blocks[i].status.valid)
1721                         continue;
1722                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1723                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1724         }
1725 }
1726
1727 /**
1728  * amdgpu_device_ip_wait_for_idle - wait for idle
1729  *
1730  * @adev: amdgpu_device pointer
1731  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1732  *
1733  * Waits for the request hardware IP to be idle.
1734  * Returns 0 for success or a negative error code on failure.
1735  */
1736 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1737                                    enum amd_ip_block_type block_type)
1738 {
1739         int i, r;
1740
1741         for (i = 0; i < adev->num_ip_blocks; i++) {
1742                 if (!adev->ip_blocks[i].status.valid)
1743                         continue;
1744                 if (adev->ip_blocks[i].version->type == block_type) {
1745                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1746                         if (r)
1747                                 return r;
1748                         break;
1749                 }
1750         }
1751         return 0;
1752
1753 }
1754
1755 /**
1756  * amdgpu_device_ip_is_idle - is the hardware IP idle
1757  *
1758  * @adev: amdgpu_device pointer
1759  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1760  *
1761  * Check if the hardware IP is idle or not.
1762  * Returns true if it the IP is idle, false if not.
1763  */
1764 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1765                               enum amd_ip_block_type block_type)
1766 {
1767         int i;
1768
1769         for (i = 0; i < adev->num_ip_blocks; i++) {
1770                 if (!adev->ip_blocks[i].status.valid)
1771                         continue;
1772                 if (adev->ip_blocks[i].version->type == block_type)
1773                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1774         }
1775         return true;
1776
1777 }
1778
1779 /**
1780  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1781  *
1782  * @adev: amdgpu_device pointer
1783  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1784  *
1785  * Returns a pointer to the hardware IP block structure
1786  * if it exists for the asic, otherwise NULL.
1787  */
1788 struct amdgpu_ip_block *
1789 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1790                               enum amd_ip_block_type type)
1791 {
1792         int i;
1793
1794         for (i = 0; i < adev->num_ip_blocks; i++)
1795                 if (adev->ip_blocks[i].version->type == type)
1796                         return &adev->ip_blocks[i];
1797
1798         return NULL;
1799 }
1800
1801 /**
1802  * amdgpu_device_ip_block_version_cmp
1803  *
1804  * @adev: amdgpu_device pointer
1805  * @type: enum amd_ip_block_type
1806  * @major: major version
1807  * @minor: minor version
1808  *
1809  * return 0 if equal or greater
1810  * return 1 if smaller or the ip_block doesn't exist
1811  */
1812 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1813                                        enum amd_ip_block_type type,
1814                                        u32 major, u32 minor)
1815 {
1816         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1817
1818         if (ip_block && ((ip_block->version->major > major) ||
1819                         ((ip_block->version->major == major) &&
1820                         (ip_block->version->minor >= minor))))
1821                 return 0;
1822
1823         return 1;
1824 }
1825
1826 /**
1827  * amdgpu_device_ip_block_add
1828  *
1829  * @adev: amdgpu_device pointer
1830  * @ip_block_version: pointer to the IP to add
1831  *
1832  * Adds the IP block driver information to the collection of IPs
1833  * on the asic.
1834  */
1835 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1836                                const struct amdgpu_ip_block_version *ip_block_version)
1837 {
1838         if (!ip_block_version)
1839                 return -EINVAL;
1840
1841         switch (ip_block_version->type) {
1842         case AMD_IP_BLOCK_TYPE_VCN:
1843                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1844                         return 0;
1845                 break;
1846         case AMD_IP_BLOCK_TYPE_JPEG:
1847                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1848                         return 0;
1849                 break;
1850         default:
1851                 break;
1852         }
1853
1854         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1855                   ip_block_version->funcs->name);
1856
1857         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1858
1859         return 0;
1860 }
1861
1862 /**
1863  * amdgpu_device_enable_virtual_display - enable virtual display feature
1864  *
1865  * @adev: amdgpu_device pointer
1866  *
1867  * Enabled the virtual display feature if the user has enabled it via
1868  * the module parameter virtual_display.  This feature provides a virtual
1869  * display hardware on headless boards or in virtualized environments.
1870  * This function parses and validates the configuration string specified by
1871  * the user and configues the virtual display configuration (number of
1872  * virtual connectors, crtcs, etc.) specified.
1873  */
1874 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1875 {
1876         adev->enable_virtual_display = false;
1877
1878         if (amdgpu_virtual_display) {
1879                 const char *pci_address_name = pci_name(adev->pdev);
1880                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1881
1882                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1883                 pciaddstr_tmp = pciaddstr;
1884                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1885                         pciaddname = strsep(&pciaddname_tmp, ",");
1886                         if (!strcmp("all", pciaddname)
1887                             || !strcmp(pci_address_name, pciaddname)) {
1888                                 long num_crtc;
1889                                 int res = -1;
1890
1891                                 adev->enable_virtual_display = true;
1892
1893                                 if (pciaddname_tmp)
1894                                         res = kstrtol(pciaddname_tmp, 10,
1895                                                       &num_crtc);
1896
1897                                 if (!res) {
1898                                         if (num_crtc < 1)
1899                                                 num_crtc = 1;
1900                                         if (num_crtc > 6)
1901                                                 num_crtc = 6;
1902                                         adev->mode_info.num_crtc = num_crtc;
1903                                 } else {
1904                                         adev->mode_info.num_crtc = 1;
1905                                 }
1906                                 break;
1907                         }
1908                 }
1909
1910                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1911                          amdgpu_virtual_display, pci_address_name,
1912                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1913
1914                 kfree(pciaddstr);
1915         }
1916 }
1917
1918 /**
1919  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1920  *
1921  * @adev: amdgpu_device pointer
1922  *
1923  * Parses the asic configuration parameters specified in the gpu info
1924  * firmware and makes them availale to the driver for use in configuring
1925  * the asic.
1926  * Returns 0 on success, -EINVAL on failure.
1927  */
1928 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1929 {
1930         const char *chip_name;
1931         char fw_name[40];
1932         int err;
1933         const struct gpu_info_firmware_header_v1_0 *hdr;
1934
1935         adev->firmware.gpu_info_fw = NULL;
1936
1937         if (adev->mman.discovery_bin) {
1938                 /*
1939                  * FIXME: The bounding box is still needed by Navi12, so
1940                  * temporarily read it from gpu_info firmware. Should be dropped
1941                  * when DAL no longer needs it.
1942                  */
1943                 if (adev->asic_type != CHIP_NAVI12)
1944                         return 0;
1945         }
1946
1947         switch (adev->asic_type) {
1948         default:
1949                 return 0;
1950         case CHIP_VEGA10:
1951                 chip_name = "vega10";
1952                 break;
1953         case CHIP_VEGA12:
1954                 chip_name = "vega12";
1955                 break;
1956         case CHIP_RAVEN:
1957                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1958                         chip_name = "raven2";
1959                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1960                         chip_name = "picasso";
1961                 else
1962                         chip_name = "raven";
1963                 break;
1964         case CHIP_ARCTURUS:
1965                 chip_name = "arcturus";
1966                 break;
1967         case CHIP_NAVI12:
1968                 chip_name = "navi12";
1969                 break;
1970         }
1971
1972         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1973         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1974         if (err) {
1975                 dev_err(adev->dev,
1976                         "Failed to load gpu_info firmware \"%s\"\n",
1977                         fw_name);
1978                 goto out;
1979         }
1980         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1981         if (err) {
1982                 dev_err(adev->dev,
1983                         "Failed to validate gpu_info firmware \"%s\"\n",
1984                         fw_name);
1985                 goto out;
1986         }
1987
1988         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1989         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1990
1991         switch (hdr->version_major) {
1992         case 1:
1993         {
1994                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1995                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1996                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1997
1998                 /*
1999                  * Should be droped when DAL no longer needs it.
2000                  */
2001                 if (adev->asic_type == CHIP_NAVI12)
2002                         goto parse_soc_bounding_box;
2003
2004                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2005                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2006                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2007                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2008                 adev->gfx.config.max_texture_channel_caches =
2009                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
2010                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2011                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2012                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2013                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2014                 adev->gfx.config.double_offchip_lds_buf =
2015                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2016                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2017                 adev->gfx.cu_info.max_waves_per_simd =
2018                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2019                 adev->gfx.cu_info.max_scratch_slots_per_cu =
2020                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2021                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2022                 if (hdr->version_minor >= 1) {
2023                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2024                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2025                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2026                         adev->gfx.config.num_sc_per_sh =
2027                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2028                         adev->gfx.config.num_packer_per_sc =
2029                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2030                 }
2031
2032 parse_soc_bounding_box:
2033                 /*
2034                  * soc bounding box info is not integrated in disocovery table,
2035                  * we always need to parse it from gpu info firmware if needed.
2036                  */
2037                 if (hdr->version_minor == 2) {
2038                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2039                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2040                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2041                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2042                 }
2043                 break;
2044         }
2045         default:
2046                 dev_err(adev->dev,
2047                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2048                 err = -EINVAL;
2049                 goto out;
2050         }
2051 out:
2052         return err;
2053 }
2054
2055 /**
2056  * amdgpu_device_ip_early_init - run early init for hardware IPs
2057  *
2058  * @adev: amdgpu_device pointer
2059  *
2060  * Early initialization pass for hardware IPs.  The hardware IPs that make
2061  * up each asic are discovered each IP's early_init callback is run.  This
2062  * is the first stage in initializing the asic.
2063  * Returns 0 on success, negative error code on failure.
2064  */
2065 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2066 {
2067         struct drm_device *dev = adev_to_drm(adev);
2068         struct pci_dev *parent;
2069         int i, r;
2070
2071         amdgpu_device_enable_virtual_display(adev);
2072
2073         if (amdgpu_sriov_vf(adev)) {
2074                 r = amdgpu_virt_request_full_gpu(adev, true);
2075                 if (r)
2076                         return r;
2077         }
2078
2079         switch (adev->asic_type) {
2080 #ifdef CONFIG_DRM_AMDGPU_SI
2081         case CHIP_VERDE:
2082         case CHIP_TAHITI:
2083         case CHIP_PITCAIRN:
2084         case CHIP_OLAND:
2085         case CHIP_HAINAN:
2086                 adev->family = AMDGPU_FAMILY_SI;
2087                 r = si_set_ip_blocks(adev);
2088                 if (r)
2089                         return r;
2090                 break;
2091 #endif
2092 #ifdef CONFIG_DRM_AMDGPU_CIK
2093         case CHIP_BONAIRE:
2094         case CHIP_HAWAII:
2095         case CHIP_KAVERI:
2096         case CHIP_KABINI:
2097         case CHIP_MULLINS:
2098                 if (adev->flags & AMD_IS_APU)
2099                         adev->family = AMDGPU_FAMILY_KV;
2100                 else
2101                         adev->family = AMDGPU_FAMILY_CI;
2102
2103                 r = cik_set_ip_blocks(adev);
2104                 if (r)
2105                         return r;
2106                 break;
2107 #endif
2108         case CHIP_TOPAZ:
2109         case CHIP_TONGA:
2110         case CHIP_FIJI:
2111         case CHIP_POLARIS10:
2112         case CHIP_POLARIS11:
2113         case CHIP_POLARIS12:
2114         case CHIP_VEGAM:
2115         case CHIP_CARRIZO:
2116         case CHIP_STONEY:
2117                 if (adev->flags & AMD_IS_APU)
2118                         adev->family = AMDGPU_FAMILY_CZ;
2119                 else
2120                         adev->family = AMDGPU_FAMILY_VI;
2121
2122                 r = vi_set_ip_blocks(adev);
2123                 if (r)
2124                         return r;
2125                 break;
2126         default:
2127                 r = amdgpu_discovery_set_ip_blocks(adev);
2128                 if (r)
2129                         return r;
2130                 break;
2131         }
2132
2133         if (amdgpu_has_atpx() &&
2134             (amdgpu_is_atpx_hybrid() ||
2135              amdgpu_has_atpx_dgpu_power_cntl()) &&
2136             ((adev->flags & AMD_IS_APU) == 0) &&
2137             !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2138                 adev->flags |= AMD_IS_PX;
2139
2140         if (!(adev->flags & AMD_IS_APU)) {
2141                 parent = pci_upstream_bridge(adev->pdev);
2142                 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2143         }
2144
2145         amdgpu_amdkfd_device_probe(adev);
2146
2147         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2148         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2149                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2150         if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2151                 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2152
2153         for (i = 0; i < adev->num_ip_blocks; i++) {
2154                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2155                         DRM_ERROR("disabled ip block: %d <%s>\n",
2156                                   i, adev->ip_blocks[i].version->funcs->name);
2157                         adev->ip_blocks[i].status.valid = false;
2158                 } else {
2159                         if (adev->ip_blocks[i].version->funcs->early_init) {
2160                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2161                                 if (r == -ENOENT) {
2162                                         adev->ip_blocks[i].status.valid = false;
2163                                 } else if (r) {
2164                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2165                                                   adev->ip_blocks[i].version->funcs->name, r);
2166                                         return r;
2167                                 } else {
2168                                         adev->ip_blocks[i].status.valid = true;
2169                                 }
2170                         } else {
2171                                 adev->ip_blocks[i].status.valid = true;
2172                         }
2173                 }
2174                 /* get the vbios after the asic_funcs are set up */
2175                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2176                         r = amdgpu_device_parse_gpu_info_fw(adev);
2177                         if (r)
2178                                 return r;
2179
2180                         /* Read BIOS */
2181                         if (!amdgpu_get_bios(adev))
2182                                 return -EINVAL;
2183
2184                         r = amdgpu_atombios_init(adev);
2185                         if (r) {
2186                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2187                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2188                                 return r;
2189                         }
2190
2191                         /*get pf2vf msg info at it's earliest time*/
2192                         if (amdgpu_sriov_vf(adev))
2193                                 amdgpu_virt_init_data_exchange(adev);
2194
2195                 }
2196         }
2197
2198         adev->cg_flags &= amdgpu_cg_mask;
2199         adev->pg_flags &= amdgpu_pg_mask;
2200
2201         return 0;
2202 }
2203
2204 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2205 {
2206         int i, r;
2207
2208         for (i = 0; i < adev->num_ip_blocks; i++) {
2209                 if (!adev->ip_blocks[i].status.sw)
2210                         continue;
2211                 if (adev->ip_blocks[i].status.hw)
2212                         continue;
2213                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2214                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2215                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2216                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2217                         if (r) {
2218                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2219                                           adev->ip_blocks[i].version->funcs->name, r);
2220                                 return r;
2221                         }
2222                         adev->ip_blocks[i].status.hw = true;
2223                 }
2224         }
2225
2226         return 0;
2227 }
2228
2229 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2230 {
2231         int i, r;
2232
2233         for (i = 0; i < adev->num_ip_blocks; i++) {
2234                 if (!adev->ip_blocks[i].status.sw)
2235                         continue;
2236                 if (adev->ip_blocks[i].status.hw)
2237                         continue;
2238                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2239                 if (r) {
2240                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2241                                   adev->ip_blocks[i].version->funcs->name, r);
2242                         return r;
2243                 }
2244                 adev->ip_blocks[i].status.hw = true;
2245         }
2246
2247         return 0;
2248 }
2249
2250 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2251 {
2252         int r = 0;
2253         int i;
2254         uint32_t smu_version;
2255
2256         if (adev->asic_type >= CHIP_VEGA10) {
2257                 for (i = 0; i < adev->num_ip_blocks; i++) {
2258                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2259                                 continue;
2260
2261                         if (!adev->ip_blocks[i].status.sw)
2262                                 continue;
2263
2264                         /* no need to do the fw loading again if already done*/
2265                         if (adev->ip_blocks[i].status.hw == true)
2266                                 break;
2267
2268                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2269                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2270                                 if (r) {
2271                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2272                                                           adev->ip_blocks[i].version->funcs->name, r);
2273                                         return r;
2274                                 }
2275                         } else {
2276                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2277                                 if (r) {
2278                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2279                                                           adev->ip_blocks[i].version->funcs->name, r);
2280                                         return r;
2281                                 }
2282                         }
2283
2284                         adev->ip_blocks[i].status.hw = true;
2285                         break;
2286                 }
2287         }
2288
2289         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2290                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2291
2292         return r;
2293 }
2294
2295 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2296 {
2297         long timeout;
2298         int r, i;
2299
2300         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2301                 struct amdgpu_ring *ring = adev->rings[i];
2302
2303                 /* No need to setup the GPU scheduler for rings that don't need it */
2304                 if (!ring || ring->no_scheduler)
2305                         continue;
2306
2307                 switch (ring->funcs->type) {
2308                 case AMDGPU_RING_TYPE_GFX:
2309                         timeout = adev->gfx_timeout;
2310                         break;
2311                 case AMDGPU_RING_TYPE_COMPUTE:
2312                         timeout = adev->compute_timeout;
2313                         break;
2314                 case AMDGPU_RING_TYPE_SDMA:
2315                         timeout = adev->sdma_timeout;
2316                         break;
2317                 default:
2318                         timeout = adev->video_timeout;
2319                         break;
2320                 }
2321
2322                 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2323                                    ring->num_hw_submission, amdgpu_job_hang_limit,
2324                                    timeout, adev->reset_domain->wq,
2325                                    ring->sched_score, ring->name,
2326                                    adev->dev);
2327                 if (r) {
2328                         DRM_ERROR("Failed to create scheduler on ring %s.\n",
2329                                   ring->name);
2330                         return r;
2331                 }
2332         }
2333
2334         return 0;
2335 }
2336
2337
2338 /**
2339  * amdgpu_device_ip_init - run init for hardware IPs
2340  *
2341  * @adev: amdgpu_device pointer
2342  *
2343  * Main initialization pass for hardware IPs.  The list of all the hardware
2344  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2345  * are run.  sw_init initializes the software state associated with each IP
2346  * and hw_init initializes the hardware associated with each IP.
2347  * Returns 0 on success, negative error code on failure.
2348  */
2349 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2350 {
2351         int i, r;
2352
2353         r = amdgpu_ras_init(adev);
2354         if (r)
2355                 return r;
2356
2357         for (i = 0; i < adev->num_ip_blocks; i++) {
2358                 if (!adev->ip_blocks[i].status.valid)
2359                         continue;
2360                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2361                 if (r) {
2362                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2363                                   adev->ip_blocks[i].version->funcs->name, r);
2364                         goto init_failed;
2365                 }
2366                 adev->ip_blocks[i].status.sw = true;
2367
2368                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2369                         /* need to do common hw init early so everything is set up for gmc */
2370                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2371                         if (r) {
2372                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2373                                 goto init_failed;
2374                         }
2375                         adev->ip_blocks[i].status.hw = true;
2376                 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2377                         /* need to do gmc hw init early so we can allocate gpu mem */
2378                         /* Try to reserve bad pages early */
2379                         if (amdgpu_sriov_vf(adev))
2380                                 amdgpu_virt_exchange_data(adev);
2381
2382                         r = amdgpu_device_vram_scratch_init(adev);
2383                         if (r) {
2384                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2385                                 goto init_failed;
2386                         }
2387                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2388                         if (r) {
2389                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2390                                 goto init_failed;
2391                         }
2392                         r = amdgpu_device_wb_init(adev);
2393                         if (r) {
2394                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2395                                 goto init_failed;
2396                         }
2397                         adev->ip_blocks[i].status.hw = true;
2398
2399                         /* right after GMC hw init, we create CSA */
2400                         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2401                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2402                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2403                                                                 AMDGPU_CSA_SIZE);
2404                                 if (r) {
2405                                         DRM_ERROR("allocate CSA failed %d\n", r);
2406                                         goto init_failed;
2407                                 }
2408                         }
2409                 }
2410         }
2411
2412         if (amdgpu_sriov_vf(adev))
2413                 amdgpu_virt_init_data_exchange(adev);
2414
2415         r = amdgpu_ib_pool_init(adev);
2416         if (r) {
2417                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2418                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2419                 goto init_failed;
2420         }
2421
2422         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2423         if (r)
2424                 goto init_failed;
2425
2426         r = amdgpu_device_ip_hw_init_phase1(adev);
2427         if (r)
2428                 goto init_failed;
2429
2430         r = amdgpu_device_fw_loading(adev);
2431         if (r)
2432                 goto init_failed;
2433
2434         r = amdgpu_device_ip_hw_init_phase2(adev);
2435         if (r)
2436                 goto init_failed;
2437
2438         /*
2439          * retired pages will be loaded from eeprom and reserved here,
2440          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2441          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2442          * for I2C communication which only true at this point.
2443          *
2444          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2445          * failure from bad gpu situation and stop amdgpu init process
2446          * accordingly. For other failed cases, it will still release all
2447          * the resource and print error message, rather than returning one
2448          * negative value to upper level.
2449          *
2450          * Note: theoretically, this should be called before all vram allocations
2451          * to protect retired page from abusing
2452          */
2453         r = amdgpu_ras_recovery_init(adev);
2454         if (r)
2455                 goto init_failed;
2456
2457         /**
2458          * In case of XGMI grab extra reference for reset domain for this device
2459          */
2460         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2461                 if (amdgpu_xgmi_add_device(adev) == 0) {
2462                         if (!amdgpu_sriov_vf(adev)) {
2463                                 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2464
2465                                 if (!hive->reset_domain ||
2466                                     !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2467                                         r = -ENOENT;
2468                                         amdgpu_put_xgmi_hive(hive);
2469                                         goto init_failed;
2470                                 }
2471
2472                                 /* Drop the early temporary reset domain we created for device */
2473                                 amdgpu_reset_put_reset_domain(adev->reset_domain);
2474                                 adev->reset_domain = hive->reset_domain;
2475                                 amdgpu_put_xgmi_hive(hive);
2476                         }
2477                 }
2478         }
2479
2480         r = amdgpu_device_init_schedulers(adev);
2481         if (r)
2482                 goto init_failed;
2483
2484         /* Don't init kfd if whole hive need to be reset during init */
2485         if (!adev->gmc.xgmi.pending_reset)
2486                 amdgpu_amdkfd_device_init(adev);
2487
2488         amdgpu_fru_get_product_info(adev);
2489
2490 init_failed:
2491         if (amdgpu_sriov_vf(adev))
2492                 amdgpu_virt_release_full_gpu(adev, true);
2493
2494         return r;
2495 }
2496
2497 /**
2498  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2499  *
2500  * @adev: amdgpu_device pointer
2501  *
2502  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2503  * this function before a GPU reset.  If the value is retained after a
2504  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2505  */
2506 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2507 {
2508         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2509 }
2510
2511 /**
2512  * amdgpu_device_check_vram_lost - check if vram is valid
2513  *
2514  * @adev: amdgpu_device pointer
2515  *
2516  * Checks the reset magic value written to the gart pointer in VRAM.
2517  * The driver calls this after a GPU reset to see if the contents of
2518  * VRAM is lost or now.
2519  * returns true if vram is lost, false if not.
2520  */
2521 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2522 {
2523         if (memcmp(adev->gart.ptr, adev->reset_magic,
2524                         AMDGPU_RESET_MAGIC_NUM))
2525                 return true;
2526
2527         if (!amdgpu_in_reset(adev))
2528                 return false;
2529
2530         /*
2531          * For all ASICs with baco/mode1 reset, the VRAM is
2532          * always assumed to be lost.
2533          */
2534         switch (amdgpu_asic_reset_method(adev)) {
2535         case AMD_RESET_METHOD_BACO:
2536         case AMD_RESET_METHOD_MODE1:
2537                 return true;
2538         default:
2539                 return false;
2540         }
2541 }
2542
2543 /**
2544  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2545  *
2546  * @adev: amdgpu_device pointer
2547  * @state: clockgating state (gate or ungate)
2548  *
2549  * The list of all the hardware IPs that make up the asic is walked and the
2550  * set_clockgating_state callbacks are run.
2551  * Late initialization pass enabling clockgating for hardware IPs.
2552  * Fini or suspend, pass disabling clockgating for hardware IPs.
2553  * Returns 0 on success, negative error code on failure.
2554  */
2555
2556 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2557                                enum amd_clockgating_state state)
2558 {
2559         int i, j, r;
2560
2561         if (amdgpu_emu_mode == 1)
2562                 return 0;
2563
2564         for (j = 0; j < adev->num_ip_blocks; j++) {
2565                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2566                 if (!adev->ip_blocks[i].status.late_initialized)
2567                         continue;
2568                 /* skip CG for GFX on S0ix */
2569                 if (adev->in_s0ix &&
2570                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2571                         continue;
2572                 /* skip CG for VCE/UVD, it's handled specially */
2573                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2574                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2575                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2576                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2577                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2578                         /* enable clockgating to save power */
2579                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2580                                                                                      state);
2581                         if (r) {
2582                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2583                                           adev->ip_blocks[i].version->funcs->name, r);
2584                                 return r;
2585                         }
2586                 }
2587         }
2588
2589         return 0;
2590 }
2591
2592 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2593                                enum amd_powergating_state state)
2594 {
2595         int i, j, r;
2596
2597         if (amdgpu_emu_mode == 1)
2598                 return 0;
2599
2600         for (j = 0; j < adev->num_ip_blocks; j++) {
2601                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2602                 if (!adev->ip_blocks[i].status.late_initialized)
2603                         continue;
2604                 /* skip PG for GFX on S0ix */
2605                 if (adev->in_s0ix &&
2606                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2607                         continue;
2608                 /* skip CG for VCE/UVD, it's handled specially */
2609                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2610                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2611                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2612                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2613                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2614                         /* enable powergating to save power */
2615                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2616                                                                                         state);
2617                         if (r) {
2618                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2619                                           adev->ip_blocks[i].version->funcs->name, r);
2620                                 return r;
2621                         }
2622                 }
2623         }
2624         return 0;
2625 }
2626
2627 static int amdgpu_device_enable_mgpu_fan_boost(void)
2628 {
2629         struct amdgpu_gpu_instance *gpu_ins;
2630         struct amdgpu_device *adev;
2631         int i, ret = 0;
2632
2633         mutex_lock(&mgpu_info.mutex);
2634
2635         /*
2636          * MGPU fan boost feature should be enabled
2637          * only when there are two or more dGPUs in
2638          * the system
2639          */
2640         if (mgpu_info.num_dgpu < 2)
2641                 goto out;
2642
2643         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2644                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2645                 adev = gpu_ins->adev;
2646                 if (!(adev->flags & AMD_IS_APU) &&
2647                     !gpu_ins->mgpu_fan_enabled) {
2648                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2649                         if (ret)
2650                                 break;
2651
2652                         gpu_ins->mgpu_fan_enabled = 1;
2653                 }
2654         }
2655
2656 out:
2657         mutex_unlock(&mgpu_info.mutex);
2658
2659         return ret;
2660 }
2661
2662 /**
2663  * amdgpu_device_ip_late_init - run late init for hardware IPs
2664  *
2665  * @adev: amdgpu_device pointer
2666  *
2667  * Late initialization pass for hardware IPs.  The list of all the hardware
2668  * IPs that make up the asic is walked and the late_init callbacks are run.
2669  * late_init covers any special initialization that an IP requires
2670  * after all of the have been initialized or something that needs to happen
2671  * late in the init process.
2672  * Returns 0 on success, negative error code on failure.
2673  */
2674 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2675 {
2676         struct amdgpu_gpu_instance *gpu_instance;
2677         int i = 0, r;
2678
2679         for (i = 0; i < adev->num_ip_blocks; i++) {
2680                 if (!adev->ip_blocks[i].status.hw)
2681                         continue;
2682                 if (adev->ip_blocks[i].version->funcs->late_init) {
2683                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2684                         if (r) {
2685                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2686                                           adev->ip_blocks[i].version->funcs->name, r);
2687                                 return r;
2688                         }
2689                 }
2690                 adev->ip_blocks[i].status.late_initialized = true;
2691         }
2692
2693         r = amdgpu_ras_late_init(adev);
2694         if (r) {
2695                 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2696                 return r;
2697         }
2698
2699         amdgpu_ras_set_error_query_ready(adev, true);
2700
2701         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2702         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2703
2704         amdgpu_device_fill_reset_magic(adev);
2705
2706         r = amdgpu_device_enable_mgpu_fan_boost();
2707         if (r)
2708                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2709
2710         /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2711         if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2712                                adev->asic_type == CHIP_ALDEBARAN ))
2713                 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2714
2715         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2716                 mutex_lock(&mgpu_info.mutex);
2717
2718                 /*
2719                  * Reset device p-state to low as this was booted with high.
2720                  *
2721                  * This should be performed only after all devices from the same
2722                  * hive get initialized.
2723                  *
2724                  * However, it's unknown how many device in the hive in advance.
2725                  * As this is counted one by one during devices initializations.
2726                  *
2727                  * So, we wait for all XGMI interlinked devices initialized.
2728                  * This may bring some delays as those devices may come from
2729                  * different hives. But that should be OK.
2730                  */
2731                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2732                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2733                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2734                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2735                                         continue;
2736
2737                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2738                                                 AMDGPU_XGMI_PSTATE_MIN);
2739                                 if (r) {
2740                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2741                                         break;
2742                                 }
2743                         }
2744                 }
2745
2746                 mutex_unlock(&mgpu_info.mutex);
2747         }
2748
2749         return 0;
2750 }
2751
2752 /**
2753  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2754  *
2755  * @adev: amdgpu_device pointer
2756  *
2757  * For ASICs need to disable SMC first
2758  */
2759 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2760 {
2761         int i, r;
2762
2763         if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2764                 return;
2765
2766         for (i = 0; i < adev->num_ip_blocks; i++) {
2767                 if (!adev->ip_blocks[i].status.hw)
2768                         continue;
2769                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2770                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2771                         /* XXX handle errors */
2772                         if (r) {
2773                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2774                                           adev->ip_blocks[i].version->funcs->name, r);
2775                         }
2776                         adev->ip_blocks[i].status.hw = false;
2777                         break;
2778                 }
2779         }
2780 }
2781
2782 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2783 {
2784         int i, r;
2785
2786         for (i = 0; i < adev->num_ip_blocks; i++) {
2787                 if (!adev->ip_blocks[i].version->funcs->early_fini)
2788                         continue;
2789
2790                 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2791                 if (r) {
2792                         DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2793                                   adev->ip_blocks[i].version->funcs->name, r);
2794                 }
2795         }
2796
2797         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2798         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2799
2800         amdgpu_amdkfd_suspend(adev, false);
2801
2802         /* Workaroud for ASICs need to disable SMC first */
2803         amdgpu_device_smu_fini_early(adev);
2804
2805         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2806                 if (!adev->ip_blocks[i].status.hw)
2807                         continue;
2808
2809                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2810                 /* XXX handle errors */
2811                 if (r) {
2812                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2813                                   adev->ip_blocks[i].version->funcs->name, r);
2814                 }
2815
2816                 adev->ip_blocks[i].status.hw = false;
2817         }
2818
2819         if (amdgpu_sriov_vf(adev)) {
2820                 if (amdgpu_virt_release_full_gpu(adev, false))
2821                         DRM_ERROR("failed to release exclusive mode on fini\n");
2822         }
2823
2824         return 0;
2825 }
2826
2827 /**
2828  * amdgpu_device_ip_fini - run fini for hardware IPs
2829  *
2830  * @adev: amdgpu_device pointer
2831  *
2832  * Main teardown pass for hardware IPs.  The list of all the hardware
2833  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2834  * are run.  hw_fini tears down the hardware associated with each IP
2835  * and sw_fini tears down any software state associated with each IP.
2836  * Returns 0 on success, negative error code on failure.
2837  */
2838 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2839 {
2840         int i, r;
2841
2842         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2843                 amdgpu_virt_release_ras_err_handler_data(adev);
2844
2845         if (adev->gmc.xgmi.num_physical_nodes > 1)
2846                 amdgpu_xgmi_remove_device(adev);
2847
2848         amdgpu_amdkfd_device_fini_sw(adev);
2849
2850         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2851                 if (!adev->ip_blocks[i].status.sw)
2852                         continue;
2853
2854                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2855                         amdgpu_ucode_free_bo(adev);
2856                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2857                         amdgpu_device_wb_fini(adev);
2858                         amdgpu_device_vram_scratch_fini(adev);
2859                         amdgpu_ib_pool_fini(adev);
2860                 }
2861
2862                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2863                 /* XXX handle errors */
2864                 if (r) {
2865                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2866                                   adev->ip_blocks[i].version->funcs->name, r);
2867                 }
2868                 adev->ip_blocks[i].status.sw = false;
2869                 adev->ip_blocks[i].status.valid = false;
2870         }
2871
2872         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2873                 if (!adev->ip_blocks[i].status.late_initialized)
2874                         continue;
2875                 if (adev->ip_blocks[i].version->funcs->late_fini)
2876                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2877                 adev->ip_blocks[i].status.late_initialized = false;
2878         }
2879
2880         amdgpu_ras_fini(adev);
2881
2882         return 0;
2883 }
2884
2885 /**
2886  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2887  *
2888  * @work: work_struct.
2889  */
2890 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2891 {
2892         struct amdgpu_device *adev =
2893                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2894         int r;
2895
2896         r = amdgpu_ib_ring_tests(adev);
2897         if (r)
2898                 DRM_ERROR("ib ring test failed (%d).\n", r);
2899 }
2900
2901 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2902 {
2903         struct amdgpu_device *adev =
2904                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2905
2906         WARN_ON_ONCE(adev->gfx.gfx_off_state);
2907         WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2908
2909         if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2910                 adev->gfx.gfx_off_state = true;
2911 }
2912
2913 /**
2914  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2915  *
2916  * @adev: amdgpu_device pointer
2917  *
2918  * Main suspend function for hardware IPs.  The list of all the hardware
2919  * IPs that make up the asic is walked, clockgating is disabled and the
2920  * suspend callbacks are run.  suspend puts the hardware and software state
2921  * in each IP into a state suitable for suspend.
2922  * Returns 0 on success, negative error code on failure.
2923  */
2924 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2925 {
2926         int i, r;
2927
2928         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2929         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2930
2931         /*
2932          * Per PMFW team's suggestion, driver needs to handle gfxoff
2933          * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2934          * scenario. Add the missing df cstate disablement here.
2935          */
2936         if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2937                 dev_warn(adev->dev, "Failed to disallow df cstate");
2938
2939         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2940                 if (!adev->ip_blocks[i].status.valid)
2941                         continue;
2942
2943                 /* displays are handled separately */
2944                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2945                         continue;
2946
2947                 /* XXX handle errors */
2948                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2949                 /* XXX handle errors */
2950                 if (r) {
2951                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2952                                   adev->ip_blocks[i].version->funcs->name, r);
2953                         return r;
2954                 }
2955
2956                 adev->ip_blocks[i].status.hw = false;
2957         }
2958
2959         return 0;
2960 }
2961
2962 /**
2963  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2964  *
2965  * @adev: amdgpu_device pointer
2966  *
2967  * Main suspend function for hardware IPs.  The list of all the hardware
2968  * IPs that make up the asic is walked, clockgating is disabled and the
2969  * suspend callbacks are run.  suspend puts the hardware and software state
2970  * in each IP into a state suitable for suspend.
2971  * Returns 0 on success, negative error code on failure.
2972  */
2973 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2974 {
2975         int i, r;
2976
2977         if (adev->in_s0ix)
2978                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2979
2980         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2981                 if (!adev->ip_blocks[i].status.valid)
2982                         continue;
2983                 /* displays are handled in phase1 */
2984                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2985                         continue;
2986                 /* PSP lost connection when err_event_athub occurs */
2987                 if (amdgpu_ras_intr_triggered() &&
2988                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2989                         adev->ip_blocks[i].status.hw = false;
2990                         continue;
2991                 }
2992
2993                 /* skip unnecessary suspend if we do not initialize them yet */
2994                 if (adev->gmc.xgmi.pending_reset &&
2995                     !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2996                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2997                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2998                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2999                         adev->ip_blocks[i].status.hw = false;
3000                         continue;
3001                 }
3002
3003                 /* skip suspend of gfx and psp for S0ix
3004                  * gfx is in gfxoff state, so on resume it will exit gfxoff just
3005                  * like at runtime. PSP is also part of the always on hardware
3006                  * so no need to suspend it.
3007                  */
3008                 if (adev->in_s0ix &&
3009                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3010                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
3011                         continue;
3012
3013                 /* XXX handle errors */
3014                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3015                 /* XXX handle errors */
3016                 if (r) {
3017                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
3018                                   adev->ip_blocks[i].version->funcs->name, r);
3019                 }
3020                 adev->ip_blocks[i].status.hw = false;
3021                 /* handle putting the SMC in the appropriate state */
3022                 if(!amdgpu_sriov_vf(adev)){
3023                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3024                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3025                                 if (r) {
3026                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3027                                                         adev->mp1_state, r);
3028                                         return r;
3029                                 }
3030                         }
3031                 }
3032         }
3033
3034         return 0;
3035 }
3036
3037 /**
3038  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3039  *
3040  * @adev: amdgpu_device pointer
3041  *
3042  * Main suspend function for hardware IPs.  The list of all the hardware
3043  * IPs that make up the asic is walked, clockgating is disabled and the
3044  * suspend callbacks are run.  suspend puts the hardware and software state
3045  * in each IP into a state suitable for suspend.
3046  * Returns 0 on success, negative error code on failure.
3047  */
3048 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3049 {
3050         int r;
3051
3052         if (amdgpu_sriov_vf(adev)) {
3053                 amdgpu_virt_fini_data_exchange(adev);
3054                 amdgpu_virt_request_full_gpu(adev, false);
3055         }
3056
3057         r = amdgpu_device_ip_suspend_phase1(adev);
3058         if (r)
3059                 return r;
3060         r = amdgpu_device_ip_suspend_phase2(adev);
3061
3062         if (amdgpu_sriov_vf(adev))
3063                 amdgpu_virt_release_full_gpu(adev, false);
3064
3065         return r;
3066 }
3067
3068 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3069 {
3070         int i, r;
3071
3072         static enum amd_ip_block_type ip_order[] = {
3073                 AMD_IP_BLOCK_TYPE_COMMON,
3074                 AMD_IP_BLOCK_TYPE_GMC,
3075                 AMD_IP_BLOCK_TYPE_PSP,
3076                 AMD_IP_BLOCK_TYPE_IH,
3077         };
3078
3079         for (i = 0; i < adev->num_ip_blocks; i++) {
3080                 int j;
3081                 struct amdgpu_ip_block *block;
3082
3083                 block = &adev->ip_blocks[i];
3084                 block->status.hw = false;
3085
3086                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3087
3088                         if (block->version->type != ip_order[j] ||
3089                                 !block->status.valid)
3090                                 continue;
3091
3092                         r = block->version->funcs->hw_init(adev);
3093                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3094                         if (r)
3095                                 return r;
3096                         block->status.hw = true;
3097                 }
3098         }
3099
3100         return 0;
3101 }
3102
3103 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3104 {
3105         int i, r;
3106
3107         static enum amd_ip_block_type ip_order[] = {
3108                 AMD_IP_BLOCK_TYPE_SMC,
3109                 AMD_IP_BLOCK_TYPE_DCE,
3110                 AMD_IP_BLOCK_TYPE_GFX,
3111                 AMD_IP_BLOCK_TYPE_SDMA,
3112                 AMD_IP_BLOCK_TYPE_UVD,
3113                 AMD_IP_BLOCK_TYPE_VCE,
3114                 AMD_IP_BLOCK_TYPE_VCN
3115         };
3116
3117         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3118                 int j;
3119                 struct amdgpu_ip_block *block;
3120
3121                 for (j = 0; j < adev->num_ip_blocks; j++) {
3122                         block = &adev->ip_blocks[j];
3123
3124                         if (block->version->type != ip_order[i] ||
3125                                 !block->status.valid ||
3126                                 block->status.hw)
3127                                 continue;
3128
3129                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3130                                 r = block->version->funcs->resume(adev);
3131                         else
3132                                 r = block->version->funcs->hw_init(adev);
3133
3134                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3135                         if (r)
3136                                 return r;
3137                         block->status.hw = true;
3138                 }
3139         }
3140
3141         return 0;
3142 }
3143
3144 /**
3145  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3146  *
3147  * @adev: amdgpu_device pointer
3148  *
3149  * First resume function for hardware IPs.  The list of all the hardware
3150  * IPs that make up the asic is walked and the resume callbacks are run for
3151  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3152  * after a suspend and updates the software state as necessary.  This
3153  * function is also used for restoring the GPU after a GPU reset.
3154  * Returns 0 on success, negative error code on failure.
3155  */
3156 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3157 {
3158         int i, r;
3159
3160         for (i = 0; i < adev->num_ip_blocks; i++) {
3161                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3162                         continue;
3163                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3164                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3165                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3166                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3167
3168                         r = adev->ip_blocks[i].version->funcs->resume(adev);
3169                         if (r) {
3170                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
3171                                           adev->ip_blocks[i].version->funcs->name, r);
3172                                 return r;
3173                         }
3174                         adev->ip_blocks[i].status.hw = true;
3175                 }
3176         }
3177
3178         return 0;
3179 }
3180
3181 /**
3182  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3183  *
3184  * @adev: amdgpu_device pointer
3185  *
3186  * First resume function for hardware IPs.  The list of all the hardware
3187  * IPs that make up the asic is walked and the resume callbacks are run for
3188  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3189  * functional state after a suspend and updates the software state as
3190  * necessary.  This function is also used for restoring the GPU after a GPU
3191  * reset.
3192  * Returns 0 on success, negative error code on failure.
3193  */
3194 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3195 {
3196         int i, r;
3197
3198         for (i = 0; i < adev->num_ip_blocks; i++) {
3199                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3200                         continue;
3201                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3202                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3203                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3204                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3205                         continue;
3206                 r = adev->ip_blocks[i].version->funcs->resume(adev);
3207                 if (r) {
3208                         DRM_ERROR("resume of IP block <%s> failed %d\n",
3209                                   adev->ip_blocks[i].version->funcs->name, r);
3210                         return r;
3211                 }
3212                 adev->ip_blocks[i].status.hw = true;
3213         }
3214
3215         return 0;
3216 }
3217
3218 /**
3219  * amdgpu_device_ip_resume - run resume for hardware IPs
3220  *
3221  * @adev: amdgpu_device pointer
3222  *
3223  * Main resume function for hardware IPs.  The hardware IPs
3224  * are split into two resume functions because they are
3225  * are also used in in recovering from a GPU reset and some additional
3226  * steps need to be take between them.  In this case (S3/S4) they are
3227  * run sequentially.
3228  * Returns 0 on success, negative error code on failure.
3229  */
3230 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3231 {
3232         int r;
3233
3234         r = amdgpu_amdkfd_resume_iommu(adev);
3235         if (r)
3236                 return r;
3237
3238         r = amdgpu_device_ip_resume_phase1(adev);
3239         if (r)
3240                 return r;
3241
3242         r = amdgpu_device_fw_loading(adev);
3243         if (r)
3244                 return r;
3245
3246         r = amdgpu_device_ip_resume_phase2(adev);
3247
3248         return r;
3249 }
3250
3251 /**
3252  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3253  *
3254  * @adev: amdgpu_device pointer
3255  *
3256  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3257  */
3258 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3259 {
3260         if (amdgpu_sriov_vf(adev)) {
3261                 if (adev->is_atom_fw) {
3262                         if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3263                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3264                 } else {
3265                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3266                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3267                 }
3268
3269                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3270                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3271         }
3272 }
3273
3274 /**
3275  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3276  *
3277  * @asic_type: AMD asic type
3278  *
3279  * Check if there is DC (new modesetting infrastructre) support for an asic.
3280  * returns true if DC has support, false if not.
3281  */
3282 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3283 {
3284         switch (asic_type) {
3285 #ifdef CONFIG_DRM_AMDGPU_SI
3286         case CHIP_HAINAN:
3287 #endif
3288         case CHIP_TOPAZ:
3289                 /* chips with no display hardware */
3290                 return false;
3291 #if defined(CONFIG_DRM_AMD_DC)
3292         case CHIP_TAHITI:
3293         case CHIP_PITCAIRN:
3294         case CHIP_VERDE:
3295         case CHIP_OLAND:
3296                 /*
3297                  * We have systems in the wild with these ASICs that require
3298                  * LVDS and VGA support which is not supported with DC.
3299                  *
3300                  * Fallback to the non-DC driver here by default so as not to
3301                  * cause regressions.
3302                  */
3303 #if defined(CONFIG_DRM_AMD_DC_SI)
3304                 return amdgpu_dc > 0;
3305 #else
3306                 return false;
3307 #endif
3308         case CHIP_BONAIRE:
3309         case CHIP_KAVERI:
3310         case CHIP_KABINI:
3311         case CHIP_MULLINS:
3312                 /*
3313                  * We have systems in the wild with these ASICs that require
3314                  * VGA support which is not supported with DC.
3315                  *
3316                  * Fallback to the non-DC driver here by default so as not to
3317                  * cause regressions.
3318                  */
3319                 return amdgpu_dc > 0;
3320         default:
3321                 return amdgpu_dc != 0;
3322 #else
3323         default:
3324                 if (amdgpu_dc > 0)
3325                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3326                                          "but isn't supported by ASIC, ignoring\n");
3327                 return false;
3328 #endif
3329         }
3330 }
3331
3332 /**
3333  * amdgpu_device_has_dc_support - check if dc is supported
3334  *
3335  * @adev: amdgpu_device pointer
3336  *
3337  * Returns true for supported, false for not supported
3338  */
3339 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3340 {
3341         if (amdgpu_sriov_vf(adev) ||
3342             adev->enable_virtual_display ||
3343             (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3344                 return false;
3345
3346         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3347 }
3348
3349 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3350 {
3351         struct amdgpu_device *adev =
3352                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3353         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3354
3355         /* It's a bug to not have a hive within this function */
3356         if (WARN_ON(!hive))
3357                 return;
3358
3359         /*
3360          * Use task barrier to synchronize all xgmi reset works across the
3361          * hive. task_barrier_enter and task_barrier_exit will block
3362          * until all the threads running the xgmi reset works reach
3363          * those points. task_barrier_full will do both blocks.
3364          */
3365         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3366
3367                 task_barrier_enter(&hive->tb);
3368                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3369
3370                 if (adev->asic_reset_res)
3371                         goto fail;
3372
3373                 task_barrier_exit(&hive->tb);
3374                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3375
3376                 if (adev->asic_reset_res)
3377                         goto fail;
3378
3379                 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3380                     adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3381                         adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3382         } else {
3383
3384                 task_barrier_full(&hive->tb);
3385                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3386         }
3387
3388 fail:
3389         if (adev->asic_reset_res)
3390                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3391                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3392         amdgpu_put_xgmi_hive(hive);
3393 }
3394
3395 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3396 {
3397         char *input = amdgpu_lockup_timeout;
3398         char *timeout_setting = NULL;
3399         int index = 0;
3400         long timeout;
3401         int ret = 0;
3402
3403         /*
3404          * By default timeout for non compute jobs is 10000
3405          * and 60000 for compute jobs.
3406          * In SR-IOV or passthrough mode, timeout for compute
3407          * jobs are 60000 by default.
3408          */
3409         adev->gfx_timeout = msecs_to_jiffies(10000);
3410         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3411         if (amdgpu_sriov_vf(adev))
3412                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3413                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3414         else
3415                 adev->compute_timeout =  msecs_to_jiffies(60000);
3416
3417         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3418                 while ((timeout_setting = strsep(&input, ",")) &&
3419                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3420                         ret = kstrtol(timeout_setting, 0, &timeout);
3421                         if (ret)
3422                                 return ret;
3423
3424                         if (timeout == 0) {
3425                                 index++;
3426                                 continue;
3427                         } else if (timeout < 0) {
3428                                 timeout = MAX_SCHEDULE_TIMEOUT;
3429                                 dev_warn(adev->dev, "lockup timeout disabled");
3430                                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3431                         } else {
3432                                 timeout = msecs_to_jiffies(timeout);
3433                         }
3434
3435                         switch (index++) {
3436                         case 0:
3437                                 adev->gfx_timeout = timeout;
3438                                 break;
3439                         case 1:
3440                                 adev->compute_timeout = timeout;
3441                                 break;
3442                         case 2:
3443                                 adev->sdma_timeout = timeout;
3444                                 break;
3445                         case 3:
3446                                 adev->video_timeout = timeout;
3447                                 break;
3448                         default:
3449                                 break;
3450                         }
3451                 }
3452                 /*
3453                  * There is only one value specified and
3454                  * it should apply to all non-compute jobs.
3455                  */
3456                 if (index == 1) {
3457                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3458                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3459                                 adev->compute_timeout = adev->gfx_timeout;
3460                 }
3461         }
3462
3463         return ret;
3464 }
3465
3466 /**
3467  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3468  *
3469  * @adev: amdgpu_device pointer
3470  *
3471  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3472  */
3473 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3474 {
3475         struct iommu_domain *domain;
3476
3477         domain = iommu_get_domain_for_dev(adev->dev);
3478         if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3479                 adev->ram_is_direct_mapped = true;
3480 }
3481
3482 static const struct attribute *amdgpu_dev_attributes[] = {
3483         &dev_attr_product_name.attr,
3484         &dev_attr_product_number.attr,
3485         &dev_attr_serial_number.attr,
3486         &dev_attr_pcie_replay_count.attr,
3487         NULL
3488 };
3489
3490 /**
3491  * amdgpu_device_init - initialize the driver
3492  *
3493  * @adev: amdgpu_device pointer
3494  * @flags: driver flags
3495  *
3496  * Initializes the driver info and hw (all asics).
3497  * Returns 0 for success or an error on failure.
3498  * Called at driver startup.
3499  */
3500 int amdgpu_device_init(struct amdgpu_device *adev,
3501                        uint32_t flags)
3502 {
3503         struct drm_device *ddev = adev_to_drm(adev);
3504         struct pci_dev *pdev = adev->pdev;
3505         int r, i;
3506         bool px = false;
3507         u32 max_MBps;
3508
3509         adev->shutdown = false;
3510         adev->flags = flags;
3511
3512         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3513                 adev->asic_type = amdgpu_force_asic_type;
3514         else
3515                 adev->asic_type = flags & AMD_ASIC_MASK;
3516
3517         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3518         if (amdgpu_emu_mode == 1)
3519                 adev->usec_timeout *= 10;
3520         adev->gmc.gart_size = 512 * 1024 * 1024;
3521         adev->accel_working = false;
3522         adev->num_rings = 0;
3523         RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3524         adev->mman.buffer_funcs = NULL;
3525         adev->mman.buffer_funcs_ring = NULL;
3526         adev->vm_manager.vm_pte_funcs = NULL;
3527         adev->vm_manager.vm_pte_num_scheds = 0;
3528         adev->gmc.gmc_funcs = NULL;
3529         adev->harvest_ip_mask = 0x0;
3530         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3531         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3532
3533         adev->smc_rreg = &amdgpu_invalid_rreg;
3534         adev->smc_wreg = &amdgpu_invalid_wreg;
3535         adev->pcie_rreg = &amdgpu_invalid_rreg;
3536         adev->pcie_wreg = &amdgpu_invalid_wreg;
3537         adev->pciep_rreg = &amdgpu_invalid_rreg;
3538         adev->pciep_wreg = &amdgpu_invalid_wreg;
3539         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3540         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3541         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3542         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3543         adev->didt_rreg = &amdgpu_invalid_rreg;
3544         adev->didt_wreg = &amdgpu_invalid_wreg;
3545         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3546         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3547         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3548         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3549
3550         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3551                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3552                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3553
3554         /* mutex initialization are all done here so we
3555          * can recall function without having locking issues */
3556         mutex_init(&adev->firmware.mutex);
3557         mutex_init(&adev->pm.mutex);
3558         mutex_init(&adev->gfx.gpu_clock_mutex);
3559         mutex_init(&adev->srbm_mutex);
3560         mutex_init(&adev->gfx.pipe_reserve_mutex);
3561         mutex_init(&adev->gfx.gfx_off_mutex);
3562         mutex_init(&adev->grbm_idx_mutex);
3563         mutex_init(&adev->mn_lock);
3564         mutex_init(&adev->virt.vf_errors.lock);
3565         hash_init(adev->mn_hash);
3566         mutex_init(&adev->psp.mutex);
3567         mutex_init(&adev->notifier_lock);
3568         mutex_init(&adev->pm.stable_pstate_ctx_lock);
3569         mutex_init(&adev->benchmark_mutex);
3570
3571         amdgpu_device_init_apu_flags(adev);
3572
3573         r = amdgpu_device_check_arguments(adev);
3574         if (r)
3575                 return r;
3576
3577         spin_lock_init(&adev->mmio_idx_lock);
3578         spin_lock_init(&adev->smc_idx_lock);
3579         spin_lock_init(&adev->pcie_idx_lock);
3580         spin_lock_init(&adev->uvd_ctx_idx_lock);
3581         spin_lock_init(&adev->didt_idx_lock);
3582         spin_lock_init(&adev->gc_cac_idx_lock);
3583         spin_lock_init(&adev->se_cac_idx_lock);
3584         spin_lock_init(&adev->audio_endpt_idx_lock);
3585         spin_lock_init(&adev->mm_stats.lock);
3586
3587         INIT_LIST_HEAD(&adev->shadow_list);
3588         mutex_init(&adev->shadow_list_lock);
3589
3590         INIT_LIST_HEAD(&adev->reset_list);
3591
3592         INIT_LIST_HEAD(&adev->ras_list);
3593
3594         INIT_DELAYED_WORK(&adev->delayed_init_work,
3595                           amdgpu_device_delayed_init_work_handler);
3596         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3597                           amdgpu_device_delay_enable_gfx_off);
3598
3599         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3600
3601         adev->gfx.gfx_off_req_count = 1;
3602         adev->gfx.gfx_off_residency = 0;
3603         adev->gfx.gfx_off_entrycount = 0;
3604         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3605
3606         atomic_set(&adev->throttling_logging_enabled, 1);
3607         /*
3608          * If throttling continues, logging will be performed every minute
3609          * to avoid log flooding. "-1" is subtracted since the thermal
3610          * throttling interrupt comes every second. Thus, the total logging
3611          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3612          * for throttling interrupt) = 60 seconds.
3613          */
3614         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3615         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3616
3617         /* Registers mapping */
3618         /* TODO: block userspace mapping of io register */
3619         if (adev->asic_type >= CHIP_BONAIRE) {
3620                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3621                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3622         } else {
3623                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3624                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3625         }
3626
3627         for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3628                 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3629
3630         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3631         if (adev->rmmio == NULL) {
3632                 return -ENOMEM;
3633         }
3634         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3635         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3636
3637         amdgpu_device_get_pcie_info(adev);
3638
3639         if (amdgpu_mcbp)
3640                 DRM_INFO("MCBP is enabled\n");
3641
3642         /*
3643          * Reset domain needs to be present early, before XGMI hive discovered
3644          * (if any) and intitialized to use reset sem and in_gpu reset flag
3645          * early on during init and before calling to RREG32.
3646          */
3647         adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3648         if (!adev->reset_domain)
3649                 return -ENOMEM;
3650
3651         /* detect hw virtualization here */
3652         amdgpu_detect_virtualization(adev);
3653
3654         r = amdgpu_device_get_job_timeout_settings(adev);
3655         if (r) {
3656                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3657                 return r;
3658         }
3659
3660         /* early init functions */
3661         r = amdgpu_device_ip_early_init(adev);
3662         if (r)
3663                 return r;
3664
3665         /* Enable TMZ based on IP_VERSION */
3666         amdgpu_gmc_tmz_set(adev);
3667
3668         amdgpu_gmc_noretry_set(adev);
3669         /* Need to get xgmi info early to decide the reset behavior*/
3670         if (adev->gmc.xgmi.supported) {
3671                 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3672                 if (r)
3673                         return r;
3674         }
3675
3676         /* enable PCIE atomic ops */
3677         if (amdgpu_sriov_vf(adev))
3678                 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3679                         adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3680                         (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3681         else
3682                 adev->have_atomics_support =
3683                         !pci_enable_atomic_ops_to_root(adev->pdev,
3684                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3685                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3686         if (!adev->have_atomics_support)
3687                 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3688
3689         /* doorbell bar mapping and doorbell index init*/
3690         amdgpu_device_doorbell_init(adev);
3691
3692         if (amdgpu_emu_mode == 1) {
3693                 /* post the asic on emulation mode */
3694                 emu_soc_asic_init(adev);
3695                 goto fence_driver_init;
3696         }
3697
3698         amdgpu_reset_init(adev);
3699
3700         /* detect if we are with an SRIOV vbios */
3701         amdgpu_device_detect_sriov_bios(adev);
3702
3703         /* check if we need to reset the asic
3704          *  E.g., driver was not cleanly unloaded previously, etc.
3705          */
3706         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3707                 if (adev->gmc.xgmi.num_physical_nodes) {
3708                         dev_info(adev->dev, "Pending hive reset.\n");
3709                         adev->gmc.xgmi.pending_reset = true;
3710                         /* Only need to init necessary block for SMU to handle the reset */
3711                         for (i = 0; i < adev->num_ip_blocks; i++) {
3712                                 if (!adev->ip_blocks[i].status.valid)
3713                                         continue;
3714                                 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3715                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3716                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3717                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3718                                         DRM_DEBUG("IP %s disabled for hw_init.\n",
3719                                                 adev->ip_blocks[i].version->funcs->name);
3720                                         adev->ip_blocks[i].status.hw = true;
3721                                 }
3722                         }
3723                 } else {
3724                         r = amdgpu_asic_reset(adev);
3725                         if (r) {
3726                                 dev_err(adev->dev, "asic reset on init failed\n");
3727                                 goto failed;
3728                         }
3729                 }
3730         }
3731
3732         pci_enable_pcie_error_reporting(adev->pdev);
3733
3734         /* Post card if necessary */
3735         if (amdgpu_device_need_post(adev)) {
3736                 if (!adev->bios) {
3737                         dev_err(adev->dev, "no vBIOS found\n");
3738                         r = -EINVAL;
3739                         goto failed;
3740                 }
3741                 DRM_INFO("GPU posting now...\n");
3742                 r = amdgpu_device_asic_init(adev);
3743                 if (r) {
3744                         dev_err(adev->dev, "gpu post error!\n");
3745                         goto failed;
3746                 }
3747         }
3748
3749         if (adev->is_atom_fw) {
3750                 /* Initialize clocks */
3751                 r = amdgpu_atomfirmware_get_clock_info(adev);
3752                 if (r) {
3753                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3754                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3755                         goto failed;
3756                 }
3757         } else {
3758                 /* Initialize clocks */
3759                 r = amdgpu_atombios_get_clock_info(adev);
3760                 if (r) {
3761                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3762                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3763                         goto failed;
3764                 }
3765                 /* init i2c buses */
3766                 if (!amdgpu_device_has_dc_support(adev))
3767                         amdgpu_atombios_i2c_init(adev);
3768         }
3769
3770 fence_driver_init:
3771         /* Fence driver */
3772         r = amdgpu_fence_driver_sw_init(adev);
3773         if (r) {
3774                 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3775                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3776                 goto failed;
3777         }
3778
3779         /* init the mode config */
3780         drm_mode_config_init(adev_to_drm(adev));
3781
3782         r = amdgpu_device_ip_init(adev);
3783         if (r) {
3784                 /* failed in exclusive mode due to timeout */
3785                 if (amdgpu_sriov_vf(adev) &&
3786                     !amdgpu_sriov_runtime(adev) &&
3787                     amdgpu_virt_mmio_blocked(adev) &&
3788                     !amdgpu_virt_wait_reset(adev)) {
3789                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3790                         /* Don't send request since VF is inactive. */
3791                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3792                         adev->virt.ops = NULL;
3793                         r = -EAGAIN;
3794                         goto release_ras_con;
3795                 }
3796                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3797                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3798                 goto release_ras_con;
3799         }
3800
3801         amdgpu_fence_driver_hw_init(adev);
3802
3803         dev_info(adev->dev,
3804                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3805                         adev->gfx.config.max_shader_engines,
3806                         adev->gfx.config.max_sh_per_se,
3807                         adev->gfx.config.max_cu_per_sh,
3808                         adev->gfx.cu_info.number);
3809
3810         adev->accel_working = true;
3811
3812         amdgpu_vm_check_compute_bug(adev);
3813
3814         /* Initialize the buffer migration limit. */
3815         if (amdgpu_moverate >= 0)
3816                 max_MBps = amdgpu_moverate;
3817         else
3818                 max_MBps = 8; /* Allow 8 MB/s. */
3819         /* Get a log2 for easy divisions. */
3820         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3821
3822         r = amdgpu_pm_sysfs_init(adev);
3823         if (r) {
3824                 adev->pm_sysfs_en = false;
3825                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3826         } else
3827                 adev->pm_sysfs_en = true;
3828
3829         r = amdgpu_ucode_sysfs_init(adev);
3830         if (r) {
3831                 adev->ucode_sysfs_en = false;
3832                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3833         } else
3834                 adev->ucode_sysfs_en = true;
3835
3836         r = amdgpu_psp_sysfs_init(adev);
3837         if (r) {
3838                 adev->psp_sysfs_en = false;
3839                 if (!amdgpu_sriov_vf(adev))
3840                         DRM_ERROR("Creating psp sysfs failed\n");
3841         } else
3842                 adev->psp_sysfs_en = true;
3843
3844         /*
3845          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3846          * Otherwise the mgpu fan boost feature will be skipped due to the
3847          * gpu instance is counted less.
3848          */
3849         amdgpu_register_gpu_instance(adev);
3850
3851         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3852          * explicit gating rather than handling it automatically.
3853          */
3854         if (!adev->gmc.xgmi.pending_reset) {
3855                 r = amdgpu_device_ip_late_init(adev);
3856                 if (r) {
3857                         dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3858                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3859                         goto release_ras_con;
3860                 }
3861                 /* must succeed. */
3862                 amdgpu_ras_resume(adev);
3863                 queue_delayed_work(system_wq, &adev->delayed_init_work,
3864                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3865         }
3866
3867         if (amdgpu_sriov_vf(adev))
3868                 flush_delayed_work(&adev->delayed_init_work);
3869
3870         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3871         if (r)
3872                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3873
3874         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3875                 r = amdgpu_pmu_init(adev);
3876         if (r)
3877                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3878
3879         /* Have stored pci confspace at hand for restore in sudden PCI error */
3880         if (amdgpu_device_cache_pci_state(adev->pdev))
3881                 pci_restore_state(pdev);
3882
3883         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3884         /* this will fail for cards that aren't VGA class devices, just
3885          * ignore it */
3886         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3887                 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3888
3889         if (amdgpu_device_supports_px(ddev)) {
3890                 px = true;
3891                 vga_switcheroo_register_client(adev->pdev,
3892                                                &amdgpu_switcheroo_ops, px);
3893                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3894         }
3895
3896         if (adev->gmc.xgmi.pending_reset)
3897                 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3898                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3899
3900         amdgpu_device_check_iommu_direct_map(adev);
3901
3902         return 0;
3903
3904 release_ras_con:
3905         amdgpu_release_ras_context(adev);
3906
3907 failed:
3908         amdgpu_vf_error_trans_all(adev);
3909
3910         return r;
3911 }
3912
3913 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3914 {
3915
3916         /* Clear all CPU mappings pointing to this device */
3917         unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3918
3919         /* Unmap all mapped bars - Doorbell, registers and VRAM */
3920         amdgpu_device_doorbell_fini(adev);
3921
3922         iounmap(adev->rmmio);
3923         adev->rmmio = NULL;
3924         if (adev->mman.aper_base_kaddr)
3925                 iounmap(adev->mman.aper_base_kaddr);
3926         adev->mman.aper_base_kaddr = NULL;
3927
3928         /* Memory manager related */
3929         if (!adev->gmc.xgmi.connected_to_cpu) {
3930                 arch_phys_wc_del(adev->gmc.vram_mtrr);
3931                 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3932         }
3933 }
3934
3935 /**
3936  * amdgpu_device_fini_hw - tear down the driver
3937  *
3938  * @adev: amdgpu_device pointer
3939  *
3940  * Tear down the driver info (all asics).
3941  * Called at driver shutdown.
3942  */
3943 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3944 {
3945         dev_info(adev->dev, "amdgpu: finishing device.\n");
3946         flush_delayed_work(&adev->delayed_init_work);
3947         adev->shutdown = true;
3948
3949         /* make sure IB test finished before entering exclusive mode
3950          * to avoid preemption on IB test
3951          * */
3952         if (amdgpu_sriov_vf(adev)) {
3953                 amdgpu_virt_request_full_gpu(adev, false);
3954                 amdgpu_virt_fini_data_exchange(adev);
3955         }
3956
3957         /* disable all interrupts */
3958         amdgpu_irq_disable_all(adev);
3959         if (adev->mode_info.mode_config_initialized){
3960                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3961                         drm_helper_force_disable_all(adev_to_drm(adev));
3962                 else
3963                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3964         }
3965         amdgpu_fence_driver_hw_fini(adev);
3966
3967         if (adev->mman.initialized) {
3968                 flush_delayed_work(&adev->mman.bdev.wq);
3969                 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3970         }
3971
3972         if (adev->pm_sysfs_en)
3973                 amdgpu_pm_sysfs_fini(adev);
3974         if (adev->ucode_sysfs_en)
3975                 amdgpu_ucode_sysfs_fini(adev);
3976         if (adev->psp_sysfs_en)
3977                 amdgpu_psp_sysfs_fini(adev);
3978         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3979
3980         /* disable ras feature must before hw fini */
3981         amdgpu_ras_pre_fini(adev);
3982
3983         amdgpu_device_ip_fini_early(adev);
3984
3985         amdgpu_irq_fini_hw(adev);
3986
3987         if (adev->mman.initialized)
3988                 ttm_device_clear_dma_mappings(&adev->mman.bdev);
3989
3990         amdgpu_gart_dummy_page_fini(adev);
3991
3992         amdgpu_device_unmap_mmio(adev);
3993
3994 }
3995
3996 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3997 {
3998         int idx;
3999
4000         amdgpu_fence_driver_sw_fini(adev);
4001         amdgpu_device_ip_fini(adev);
4002         release_firmware(adev->firmware.gpu_info_fw);
4003         adev->firmware.gpu_info_fw = NULL;
4004         adev->accel_working = false;
4005         dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4006
4007         amdgpu_reset_fini(adev);
4008
4009         /* free i2c buses */
4010         if (!amdgpu_device_has_dc_support(adev))
4011                 amdgpu_i2c_fini(adev);
4012
4013         if (amdgpu_emu_mode != 1)
4014                 amdgpu_atombios_fini(adev);
4015
4016         kfree(adev->bios);
4017         adev->bios = NULL;
4018         if (amdgpu_device_supports_px(adev_to_drm(adev))) {
4019                 vga_switcheroo_unregister_client(adev->pdev);
4020                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4021         }
4022         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4023                 vga_client_unregister(adev->pdev);
4024
4025         if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4026
4027                 iounmap(adev->rmmio);
4028                 adev->rmmio = NULL;
4029                 amdgpu_device_doorbell_fini(adev);
4030                 drm_dev_exit(idx);
4031         }
4032
4033         if (IS_ENABLED(CONFIG_PERF_EVENTS))
4034                 amdgpu_pmu_fini(adev);
4035         if (adev->mman.discovery_bin)
4036                 amdgpu_discovery_fini(adev);
4037
4038         amdgpu_reset_put_reset_domain(adev->reset_domain);
4039         adev->reset_domain = NULL;
4040
4041         kfree(adev->pci_state);
4042
4043 }
4044
4045 /**
4046  * amdgpu_device_evict_resources - evict device resources
4047  * @adev: amdgpu device object
4048  *
4049  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4050  * of the vram memory type. Mainly used for evicting device resources
4051  * at suspend time.
4052  *
4053  */
4054 static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
4055 {
4056         /* No need to evict vram on APUs for suspend to ram or s2idle */
4057         if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4058                 return;
4059
4060         if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
4061                 DRM_WARN("evicting device resources failed\n");
4062
4063 }
4064
4065 /*
4066  * Suspend & resume.
4067  */
4068 /**
4069  * amdgpu_device_suspend - initiate device suspend
4070  *
4071  * @dev: drm dev pointer
4072  * @fbcon : notify the fbdev of suspend
4073  *
4074  * Puts the hw in the suspend state (all asics).
4075  * Returns 0 for success or an error on failure.
4076  * Called at driver suspend.
4077  */
4078 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4079 {
4080         struct amdgpu_device *adev = drm_to_adev(dev);
4081         int r = 0;
4082
4083         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4084                 return 0;
4085
4086         adev->in_suspend = true;
4087
4088         if (amdgpu_sriov_vf(adev)) {
4089                 amdgpu_virt_fini_data_exchange(adev);
4090                 r = amdgpu_virt_request_full_gpu(adev, false);
4091                 if (r)
4092                         return r;
4093         }
4094
4095         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4096                 DRM_WARN("smart shift update failed\n");
4097
4098         drm_kms_helper_poll_disable(dev);
4099
4100         if (fbcon)
4101                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4102
4103         cancel_delayed_work_sync(&adev->delayed_init_work);
4104
4105         amdgpu_ras_suspend(adev);
4106
4107         amdgpu_device_ip_suspend_phase1(adev);
4108
4109         if (!adev->in_s0ix)
4110                 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4111
4112         amdgpu_device_evict_resources(adev);
4113
4114         amdgpu_fence_driver_hw_fini(adev);
4115
4116         amdgpu_device_ip_suspend_phase2(adev);
4117
4118         if (amdgpu_sriov_vf(adev))
4119                 amdgpu_virt_release_full_gpu(adev, false);
4120
4121         return 0;
4122 }
4123
4124 /**
4125  * amdgpu_device_resume - initiate device resume
4126  *
4127  * @dev: drm dev pointer
4128  * @fbcon : notify the fbdev of resume
4129  *
4130  * Bring the hw back to operating state (all asics).
4131  * Returns 0 for success or an error on failure.
4132  * Called at driver resume.
4133  */
4134 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4135 {
4136         struct amdgpu_device *adev = drm_to_adev(dev);
4137         int r = 0;
4138
4139         if (amdgpu_sriov_vf(adev)) {
4140                 r = amdgpu_virt_request_full_gpu(adev, true);
4141                 if (r)
4142                         return r;
4143         }
4144
4145         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4146                 return 0;
4147
4148         if (adev->in_s0ix)
4149                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4150
4151         /* post card */
4152         if (amdgpu_device_need_post(adev)) {
4153                 r = amdgpu_device_asic_init(adev);
4154                 if (r)
4155                         dev_err(adev->dev, "amdgpu asic init failed\n");
4156         }
4157
4158         r = amdgpu_device_ip_resume(adev);
4159
4160         /* no matter what r is, always need to properly release full GPU */
4161         if (amdgpu_sriov_vf(adev)) {
4162                 amdgpu_virt_init_data_exchange(adev);
4163                 amdgpu_virt_release_full_gpu(adev, true);
4164         }
4165
4166         if (r) {
4167                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4168                 return r;
4169         }
4170         amdgpu_fence_driver_hw_init(adev);
4171
4172         r = amdgpu_device_ip_late_init(adev);
4173         if (r)
4174                 return r;
4175
4176         queue_delayed_work(system_wq, &adev->delayed_init_work,
4177                            msecs_to_jiffies(AMDGPU_RESUME_MS));
4178
4179         if (!adev->in_s0ix) {
4180                 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4181                 if (r)
4182                         return r;
4183         }
4184
4185         /* Make sure IB tests flushed */
4186         flush_delayed_work(&adev->delayed_init_work);
4187
4188         if (fbcon)
4189                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4190
4191         drm_kms_helper_poll_enable(dev);
4192
4193         amdgpu_ras_resume(adev);
4194
4195         /*
4196          * Most of the connector probing functions try to acquire runtime pm
4197          * refs to ensure that the GPU is powered on when connector polling is
4198          * performed. Since we're calling this from a runtime PM callback,
4199          * trying to acquire rpm refs will cause us to deadlock.
4200          *
4201          * Since we're guaranteed to be holding the rpm lock, it's safe to
4202          * temporarily disable the rpm helpers so this doesn't deadlock us.
4203          */
4204 #ifdef CONFIG_PM
4205         dev->dev->power.disable_depth++;
4206 #endif
4207         if (!amdgpu_device_has_dc_support(adev))
4208                 drm_helper_hpd_irq_event(dev);
4209         else
4210                 drm_kms_helper_hotplug_event(dev);
4211 #ifdef CONFIG_PM
4212         dev->dev->power.disable_depth--;
4213 #endif
4214         adev->in_suspend = false;
4215
4216         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4217                 DRM_WARN("smart shift update failed\n");
4218
4219         return 0;
4220 }
4221
4222 /**
4223  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4224  *
4225  * @adev: amdgpu_device pointer
4226  *
4227  * The list of all the hardware IPs that make up the asic is walked and
4228  * the check_soft_reset callbacks are run.  check_soft_reset determines
4229  * if the asic is still hung or not.
4230  * Returns true if any of the IPs are still in a hung state, false if not.
4231  */
4232 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4233 {
4234         int i;
4235         bool asic_hang = false;
4236
4237         if (amdgpu_sriov_vf(adev))
4238                 return true;
4239
4240         if (amdgpu_asic_need_full_reset(adev))
4241                 return true;
4242
4243         for (i = 0; i < adev->num_ip_blocks; i++) {
4244                 if (!adev->ip_blocks[i].status.valid)
4245                         continue;
4246                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4247                         adev->ip_blocks[i].status.hang =
4248                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4249                 if (adev->ip_blocks[i].status.hang) {
4250                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4251                         asic_hang = true;
4252                 }
4253         }
4254         return asic_hang;
4255 }
4256
4257 /**
4258  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4259  *
4260  * @adev: amdgpu_device pointer
4261  *
4262  * The list of all the hardware IPs that make up the asic is walked and the
4263  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4264  * handles any IP specific hardware or software state changes that are
4265  * necessary for a soft reset to succeed.
4266  * Returns 0 on success, negative error code on failure.
4267  */
4268 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4269 {
4270         int i, r = 0;
4271
4272         for (i = 0; i < adev->num_ip_blocks; i++) {
4273                 if (!adev->ip_blocks[i].status.valid)
4274                         continue;
4275                 if (adev->ip_blocks[i].status.hang &&
4276                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4277                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4278                         if (r)
4279                                 return r;
4280                 }
4281         }
4282
4283         return 0;
4284 }
4285
4286 /**
4287  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4288  *
4289  * @adev: amdgpu_device pointer
4290  *
4291  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4292  * reset is necessary to recover.
4293  * Returns true if a full asic reset is required, false if not.
4294  */
4295 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4296 {
4297         int i;
4298
4299         if (amdgpu_asic_need_full_reset(adev))
4300                 return true;
4301
4302         for (i = 0; i < adev->num_ip_blocks; i++) {
4303                 if (!adev->ip_blocks[i].status.valid)
4304                         continue;
4305                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4306                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4307                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4308                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4309                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4310                         if (adev->ip_blocks[i].status.hang) {
4311                                 dev_info(adev->dev, "Some block need full reset!\n");
4312                                 return true;
4313                         }
4314                 }
4315         }
4316         return false;
4317 }
4318
4319 /**
4320  * amdgpu_device_ip_soft_reset - do a soft reset
4321  *
4322  * @adev: amdgpu_device pointer
4323  *
4324  * The list of all the hardware IPs that make up the asic is walked and the
4325  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4326  * IP specific hardware or software state changes that are necessary to soft
4327  * reset the IP.
4328  * Returns 0 on success, negative error code on failure.
4329  */
4330 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4331 {
4332         int i, r = 0;
4333
4334         for (i = 0; i < adev->num_ip_blocks; i++) {
4335                 if (!adev->ip_blocks[i].status.valid)
4336                         continue;
4337                 if (adev->ip_blocks[i].status.hang &&
4338                     adev->ip_blocks[i].version->funcs->soft_reset) {
4339                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4340                         if (r)
4341                                 return r;
4342                 }
4343         }
4344
4345         return 0;
4346 }
4347
4348 /**
4349  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4350  *
4351  * @adev: amdgpu_device pointer
4352  *
4353  * The list of all the hardware IPs that make up the asic is walked and the
4354  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4355  * handles any IP specific hardware or software state changes that are
4356  * necessary after the IP has been soft reset.
4357  * Returns 0 on success, negative error code on failure.
4358  */
4359 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4360 {
4361         int i, r = 0;
4362
4363         for (i = 0; i < adev->num_ip_blocks; i++) {
4364                 if (!adev->ip_blocks[i].status.valid)
4365                         continue;
4366                 if (adev->ip_blocks[i].status.hang &&
4367                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4368                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4369                 if (r)
4370                         return r;
4371         }
4372
4373         return 0;
4374 }
4375
4376 /**
4377  * amdgpu_device_recover_vram - Recover some VRAM contents
4378  *
4379  * @adev: amdgpu_device pointer
4380  *
4381  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4382  * restore things like GPUVM page tables after a GPU reset where
4383  * the contents of VRAM might be lost.
4384  *
4385  * Returns:
4386  * 0 on success, negative error code on failure.
4387  */
4388 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4389 {
4390         struct dma_fence *fence = NULL, *next = NULL;
4391         struct amdgpu_bo *shadow;
4392         struct amdgpu_bo_vm *vmbo;
4393         long r = 1, tmo;
4394
4395         if (amdgpu_sriov_runtime(adev))
4396                 tmo = msecs_to_jiffies(8000);
4397         else
4398                 tmo = msecs_to_jiffies(100);
4399
4400         dev_info(adev->dev, "recover vram bo from shadow start\n");
4401         mutex_lock(&adev->shadow_list_lock);
4402         list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4403                 shadow = &vmbo->bo;
4404                 /* No need to recover an evicted BO */
4405                 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4406                     shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4407                     shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4408                         continue;
4409
4410                 r = amdgpu_bo_restore_shadow(shadow, &next);
4411                 if (r)
4412                         break;
4413
4414                 if (fence) {
4415                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4416                         dma_fence_put(fence);
4417                         fence = next;
4418                         if (tmo == 0) {
4419                                 r = -ETIMEDOUT;
4420                                 break;
4421                         } else if (tmo < 0) {
4422                                 r = tmo;
4423                                 break;
4424                         }
4425                 } else {
4426                         fence = next;
4427                 }
4428         }
4429         mutex_unlock(&adev->shadow_list_lock);
4430
4431         if (fence)
4432                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4433         dma_fence_put(fence);
4434
4435         if (r < 0 || tmo <= 0) {
4436                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4437                 return -EIO;
4438         }
4439
4440         dev_info(adev->dev, "recover vram bo from shadow done\n");
4441         return 0;
4442 }
4443
4444
4445 /**
4446  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4447  *
4448  * @adev: amdgpu_device pointer
4449  * @from_hypervisor: request from hypervisor
4450  *
4451  * do VF FLR and reinitialize Asic
4452  * return 0 means succeeded otherwise failed
4453  */
4454 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4455                                      bool from_hypervisor)
4456 {
4457         int r;
4458         struct amdgpu_hive_info *hive = NULL;
4459         int retry_limit = 0;
4460
4461 retry:
4462         amdgpu_amdkfd_pre_reset(adev);
4463
4464         if (from_hypervisor)
4465                 r = amdgpu_virt_request_full_gpu(adev, true);
4466         else
4467                 r = amdgpu_virt_reset_gpu(adev);
4468         if (r)
4469                 return r;
4470
4471         /* Resume IP prior to SMC */
4472         r = amdgpu_device_ip_reinit_early_sriov(adev);
4473         if (r)
4474                 goto error;
4475
4476         amdgpu_virt_init_data_exchange(adev);
4477
4478         r = amdgpu_device_fw_loading(adev);
4479         if (r)
4480                 return r;
4481
4482         /* now we are okay to resume SMC/CP/SDMA */
4483         r = amdgpu_device_ip_reinit_late_sriov(adev);
4484         if (r)
4485                 goto error;
4486
4487         hive = amdgpu_get_xgmi_hive(adev);
4488         /* Update PSP FW topology after reset */
4489         if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4490                 r = amdgpu_xgmi_update_topology(hive, adev);
4491
4492         if (hive)
4493                 amdgpu_put_xgmi_hive(hive);
4494
4495         if (!r) {
4496                 amdgpu_irq_gpu_reset_resume_helper(adev);
4497                 r = amdgpu_ib_ring_tests(adev);
4498
4499                 amdgpu_amdkfd_post_reset(adev);
4500         }
4501
4502 error:
4503         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4504                 amdgpu_inc_vram_lost(adev);
4505                 r = amdgpu_device_recover_vram(adev);
4506         }
4507         amdgpu_virt_release_full_gpu(adev, true);
4508
4509         if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4510                 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4511                         retry_limit++;
4512                         goto retry;
4513                 } else
4514                         DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4515         }
4516
4517         return r;
4518 }
4519
4520 /**
4521  * amdgpu_device_has_job_running - check if there is any job in mirror list
4522  *
4523  * @adev: amdgpu_device pointer
4524  *
4525  * check if there is any job in mirror list
4526  */
4527 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4528 {
4529         int i;
4530         struct drm_sched_job *job;
4531
4532         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4533                 struct amdgpu_ring *ring = adev->rings[i];
4534
4535                 if (!ring || !ring->sched.thread)
4536                         continue;
4537
4538                 spin_lock(&ring->sched.job_list_lock);
4539                 job = list_first_entry_or_null(&ring->sched.pending_list,
4540                                                struct drm_sched_job, list);
4541                 spin_unlock(&ring->sched.job_list_lock);
4542                 if (job)
4543                         return true;
4544         }
4545         return false;
4546 }
4547
4548 /**
4549  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4550  *
4551  * @adev: amdgpu_device pointer
4552  *
4553  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4554  * a hung GPU.
4555  */
4556 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4557 {
4558
4559         if (amdgpu_gpu_recovery == 0)
4560                 goto disabled;
4561
4562         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4563                 dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
4564                 return false;
4565         }
4566
4567         if (amdgpu_sriov_vf(adev))
4568                 return true;
4569
4570         if (amdgpu_gpu_recovery == -1) {
4571                 switch (adev->asic_type) {
4572 #ifdef CONFIG_DRM_AMDGPU_SI
4573                 case CHIP_VERDE:
4574                 case CHIP_TAHITI:
4575                 case CHIP_PITCAIRN:
4576                 case CHIP_OLAND:
4577                 case CHIP_HAINAN:
4578 #endif
4579 #ifdef CONFIG_DRM_AMDGPU_CIK
4580                 case CHIP_KAVERI:
4581                 case CHIP_KABINI:
4582                 case CHIP_MULLINS:
4583 #endif
4584                 case CHIP_CARRIZO:
4585                 case CHIP_STONEY:
4586                 case CHIP_CYAN_SKILLFISH:
4587                         goto disabled;
4588                 default:
4589                         break;
4590                 }
4591         }
4592
4593         return true;
4594
4595 disabled:
4596                 dev_info(adev->dev, "GPU recovery disabled.\n");
4597                 return false;
4598 }
4599
4600 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4601 {
4602         u32 i;
4603         int ret = 0;
4604
4605         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4606
4607         dev_info(adev->dev, "GPU mode1 reset\n");
4608
4609         /* disable BM */
4610         pci_clear_master(adev->pdev);
4611
4612         amdgpu_device_cache_pci_state(adev->pdev);
4613
4614         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4615                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4616                 ret = amdgpu_dpm_mode1_reset(adev);
4617         } else {
4618                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4619                 ret = psp_gpu_reset(adev);
4620         }
4621
4622         if (ret)
4623                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4624
4625         amdgpu_device_load_pci_state(adev->pdev);
4626
4627         /* wait for asic to come out of reset */
4628         for (i = 0; i < adev->usec_timeout; i++) {
4629                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4630
4631                 if (memsize != 0xffffffff)
4632                         break;
4633                 udelay(1);
4634         }
4635
4636         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4637         return ret;
4638 }
4639
4640 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4641                                  struct amdgpu_reset_context *reset_context)
4642 {
4643         int i, r = 0;
4644         struct amdgpu_job *job = NULL;
4645         bool need_full_reset =
4646                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4647
4648         if (reset_context->reset_req_dev == adev)
4649                 job = reset_context->job;
4650
4651         if (amdgpu_sriov_vf(adev)) {
4652                 /* stop the data exchange thread */
4653                 amdgpu_virt_fini_data_exchange(adev);
4654         }
4655
4656         amdgpu_fence_driver_isr_toggle(adev, true);
4657
4658         /* block all schedulers and reset given job's ring */
4659         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4660                 struct amdgpu_ring *ring = adev->rings[i];
4661
4662                 if (!ring || !ring->sched.thread)
4663                         continue;
4664
4665                 /*clear job fence from fence drv to avoid force_completion
4666                  *leave NULL and vm flush fence in fence drv */
4667                 amdgpu_fence_driver_clear_job_fences(ring);
4668
4669                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4670                 amdgpu_fence_driver_force_completion(ring);
4671         }
4672
4673         amdgpu_fence_driver_isr_toggle(adev, false);
4674
4675         if (job && job->vm)
4676                 drm_sched_increase_karma(&job->base);
4677
4678         r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4679         /* If reset handler not implemented, continue; otherwise return */
4680         if (r == -ENOSYS)
4681                 r = 0;
4682         else
4683                 return r;
4684
4685         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4686         if (!amdgpu_sriov_vf(adev)) {
4687
4688                 if (!need_full_reset)
4689                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4690
4691                 if (!need_full_reset && amdgpu_gpu_recovery) {
4692                         amdgpu_device_ip_pre_soft_reset(adev);
4693                         r = amdgpu_device_ip_soft_reset(adev);
4694                         amdgpu_device_ip_post_soft_reset(adev);
4695                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4696                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4697                                 need_full_reset = true;
4698                         }
4699                 }
4700
4701                 if (need_full_reset)
4702                         r = amdgpu_device_ip_suspend(adev);
4703                 if (need_full_reset)
4704                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4705                 else
4706                         clear_bit(AMDGPU_NEED_FULL_RESET,
4707                                   &reset_context->flags);
4708         }
4709
4710         return r;
4711 }
4712
4713 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4714 {
4715         int i;
4716
4717         lockdep_assert_held(&adev->reset_domain->sem);
4718
4719         for (i = 0; i < adev->num_regs; i++) {
4720                 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4721                 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4722                                              adev->reset_dump_reg_value[i]);
4723         }
4724
4725         return 0;
4726 }
4727
4728 #ifdef CONFIG_DEV_COREDUMP
4729 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4730                 size_t count, void *data, size_t datalen)
4731 {
4732         struct drm_printer p;
4733         struct amdgpu_device *adev = data;
4734         struct drm_print_iterator iter;
4735         int i;
4736
4737         iter.data = buffer;
4738         iter.offset = 0;
4739         iter.start = offset;
4740         iter.remain = count;
4741
4742         p = drm_coredump_printer(&iter);
4743
4744         drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4745         drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4746         drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4747         drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4748         if (adev->reset_task_info.pid)
4749                 drm_printf(&p, "process_name: %s PID: %d\n",
4750                            adev->reset_task_info.process_name,
4751                            adev->reset_task_info.pid);
4752
4753         if (adev->reset_vram_lost)
4754                 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4755         if (adev->num_regs) {
4756                 drm_printf(&p, "AMDGPU register dumps:\nOffset:     Value:\n");
4757
4758                 for (i = 0; i < adev->num_regs; i++)
4759                         drm_printf(&p, "0x%08x: 0x%08x\n",
4760                                    adev->reset_dump_reg_list[i],
4761                                    adev->reset_dump_reg_value[i]);
4762         }
4763
4764         return count - iter.remain;
4765 }
4766
4767 static void amdgpu_devcoredump_free(void *data)
4768 {
4769 }
4770
4771 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4772 {
4773         struct drm_device *dev = adev_to_drm(adev);
4774
4775         ktime_get_ts64(&adev->reset_time);
4776         dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4777                       amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4778 }
4779 #endif
4780
4781 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4782                          struct amdgpu_reset_context *reset_context)
4783 {
4784         struct amdgpu_device *tmp_adev = NULL;
4785         bool need_full_reset, skip_hw_reset, vram_lost = false;
4786         int r = 0;
4787         bool gpu_reset_for_dev_remove = 0;
4788
4789         /* Try reset handler method first */
4790         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4791                                     reset_list);
4792         amdgpu_reset_reg_dumps(tmp_adev);
4793
4794         reset_context->reset_device_list = device_list_handle;
4795         r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4796         /* If reset handler not implemented, continue; otherwise return */
4797         if (r == -ENOSYS)
4798                 r = 0;
4799         else
4800                 return r;
4801
4802         /* Reset handler not implemented, use the default method */
4803         need_full_reset =
4804                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4805         skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4806
4807         gpu_reset_for_dev_remove =
4808                 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4809                         test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4810
4811         /*
4812          * ASIC reset has to be done on all XGMI hive nodes ASAP
4813          * to allow proper links negotiation in FW (within 1 sec)
4814          */
4815         if (!skip_hw_reset && need_full_reset) {
4816                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4817                         /* For XGMI run all resets in parallel to speed up the process */
4818                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4819                                 tmp_adev->gmc.xgmi.pending_reset = false;
4820                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4821                                         r = -EALREADY;
4822                         } else
4823                                 r = amdgpu_asic_reset(tmp_adev);
4824
4825                         if (r) {
4826                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4827                                          r, adev_to_drm(tmp_adev)->unique);
4828                                 break;
4829                         }
4830                 }
4831
4832                 /* For XGMI wait for all resets to complete before proceed */
4833                 if (!r) {
4834                         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4835                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4836                                         flush_work(&tmp_adev->xgmi_reset_work);
4837                                         r = tmp_adev->asic_reset_res;
4838                                         if (r)
4839                                                 break;
4840                                 }
4841                         }
4842                 }
4843         }
4844
4845         if (!r && amdgpu_ras_intr_triggered()) {
4846                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4847                         if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4848                             tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4849                                 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4850                 }
4851
4852                 amdgpu_ras_intr_cleared();
4853         }
4854
4855         /* Since the mode1 reset affects base ip blocks, the
4856          * phase1 ip blocks need to be resumed. Otherwise there
4857          * will be a BIOS signature error and the psp bootloader
4858          * can't load kdb on the next amdgpu install.
4859          */
4860         if (gpu_reset_for_dev_remove) {
4861                 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
4862                         amdgpu_device_ip_resume_phase1(tmp_adev);
4863
4864                 goto end;
4865         }
4866
4867         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4868                 if (need_full_reset) {
4869                         /* post card */
4870                         r = amdgpu_device_asic_init(tmp_adev);
4871                         if (r) {
4872                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4873                         } else {
4874                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4875                                 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4876                                 if (r)
4877                                         goto out;
4878
4879                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4880                                 if (r)
4881                                         goto out;
4882
4883                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4884 #ifdef CONFIG_DEV_COREDUMP
4885                                 tmp_adev->reset_vram_lost = vram_lost;
4886                                 memset(&tmp_adev->reset_task_info, 0,
4887                                                 sizeof(tmp_adev->reset_task_info));
4888                                 if (reset_context->job && reset_context->job->vm)
4889                                         tmp_adev->reset_task_info =
4890                                                 reset_context->job->vm->task_info;
4891                                 amdgpu_reset_capture_coredumpm(tmp_adev);
4892 #endif
4893                                 if (vram_lost) {
4894                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4895                                         amdgpu_inc_vram_lost(tmp_adev);
4896                                 }
4897
4898                                 r = amdgpu_device_fw_loading(tmp_adev);
4899                                 if (r)
4900                                         return r;
4901
4902                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4903                                 if (r)
4904                                         goto out;
4905
4906                                 if (vram_lost)
4907                                         amdgpu_device_fill_reset_magic(tmp_adev);
4908
4909                                 /*
4910                                  * Add this ASIC as tracked as reset was already
4911                                  * complete successfully.
4912                                  */
4913                                 amdgpu_register_gpu_instance(tmp_adev);
4914
4915                                 if (!reset_context->hive &&
4916                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4917                                         amdgpu_xgmi_add_device(tmp_adev);
4918
4919                                 r = amdgpu_device_ip_late_init(tmp_adev);
4920                                 if (r)
4921                                         goto out;
4922
4923                                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4924
4925                                 /*
4926                                  * The GPU enters bad state once faulty pages
4927                                  * by ECC has reached the threshold, and ras
4928                                  * recovery is scheduled next. So add one check
4929                                  * here to break recovery if it indeed exceeds
4930                                  * bad page threshold, and remind user to
4931                                  * retire this GPU or setting one bigger
4932                                  * bad_page_threshold value to fix this once
4933                                  * probing driver again.
4934                                  */
4935                                 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4936                                         /* must succeed. */
4937                                         amdgpu_ras_resume(tmp_adev);
4938                                 } else {
4939                                         r = -EINVAL;
4940                                         goto out;
4941                                 }
4942
4943                                 /* Update PSP FW topology after reset */
4944                                 if (reset_context->hive &&
4945                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4946                                         r = amdgpu_xgmi_update_topology(
4947                                                 reset_context->hive, tmp_adev);
4948                         }
4949                 }
4950
4951 out:
4952                 if (!r) {
4953                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4954                         r = amdgpu_ib_ring_tests(tmp_adev);
4955                         if (r) {
4956                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4957                                 need_full_reset = true;
4958                                 r = -EAGAIN;
4959                                 goto end;
4960                         }
4961                 }
4962
4963                 if (!r)
4964                         r = amdgpu_device_recover_vram(tmp_adev);
4965                 else
4966                         tmp_adev->asic_reset_res = r;
4967         }
4968
4969 end:
4970         if (need_full_reset)
4971                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4972         else
4973                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4974         return r;
4975 }
4976
4977 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
4978 {
4979
4980         switch (amdgpu_asic_reset_method(adev)) {
4981         case AMD_RESET_METHOD_MODE1:
4982                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4983                 break;
4984         case AMD_RESET_METHOD_MODE2:
4985                 adev->mp1_state = PP_MP1_STATE_RESET;
4986                 break;
4987         default:
4988                 adev->mp1_state = PP_MP1_STATE_NONE;
4989                 break;
4990         }
4991 }
4992
4993 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
4994 {
4995         amdgpu_vf_error_trans_all(adev);
4996         adev->mp1_state = PP_MP1_STATE_NONE;
4997 }
4998
4999 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5000 {
5001         struct pci_dev *p = NULL;
5002
5003         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5004                         adev->pdev->bus->number, 1);
5005         if (p) {
5006                 pm_runtime_enable(&(p->dev));
5007                 pm_runtime_resume(&(p->dev));
5008         }
5009 }
5010
5011 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5012 {
5013         enum amd_reset_method reset_method;
5014         struct pci_dev *p = NULL;
5015         u64 expires;
5016
5017         /*
5018          * For now, only BACO and mode1 reset are confirmed
5019          * to suffer the audio issue without proper suspended.
5020          */
5021         reset_method = amdgpu_asic_reset_method(adev);
5022         if ((reset_method != AMD_RESET_METHOD_BACO) &&
5023              (reset_method != AMD_RESET_METHOD_MODE1))
5024                 return -EINVAL;
5025
5026         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5027                         adev->pdev->bus->number, 1);
5028         if (!p)
5029                 return -ENODEV;
5030
5031         expires = pm_runtime_autosuspend_expiration(&(p->dev));
5032         if (!expires)
5033                 /*
5034                  * If we cannot get the audio device autosuspend delay,
5035                  * a fixed 4S interval will be used. Considering 3S is
5036                  * the audio controller default autosuspend delay setting.
5037                  * 4S used here is guaranteed to cover that.
5038                  */
5039                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5040
5041         while (!pm_runtime_status_suspended(&(p->dev))) {
5042                 if (!pm_runtime_suspend(&(p->dev)))
5043                         break;
5044
5045                 if (expires < ktime_get_mono_fast_ns()) {
5046                         dev_warn(adev->dev, "failed to suspend display audio\n");
5047                         /* TODO: abort the succeeding gpu reset? */
5048                         return -ETIMEDOUT;
5049                 }
5050         }
5051
5052         pm_runtime_disable(&(p->dev));
5053
5054         return 0;
5055 }
5056
5057 static void amdgpu_device_recheck_guilty_jobs(
5058         struct amdgpu_device *adev, struct list_head *device_list_handle,
5059         struct amdgpu_reset_context *reset_context)
5060 {
5061         int i, r = 0;
5062
5063         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5064                 struct amdgpu_ring *ring = adev->rings[i];
5065                 int ret = 0;
5066                 struct drm_sched_job *s_job;
5067
5068                 if (!ring || !ring->sched.thread)
5069                         continue;
5070
5071                 s_job = list_first_entry_or_null(&ring->sched.pending_list,
5072                                 struct drm_sched_job, list);
5073                 if (s_job == NULL)
5074                         continue;
5075
5076                 /* clear job's guilty and depend the folowing step to decide the real one */
5077                 drm_sched_reset_karma(s_job);
5078                 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
5079
5080                 if (!s_job->s_fence->parent) {
5081                         DRM_WARN("Failed to get a HW fence for job!");
5082                         continue;
5083                 }
5084
5085                 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
5086                 if (ret == 0) { /* timeout */
5087                         DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
5088                                                 ring->sched.name, s_job->id);
5089
5090
5091                         amdgpu_fence_driver_isr_toggle(adev, true);
5092
5093                         /* Clear this failed job from fence array */
5094                         amdgpu_fence_driver_clear_job_fences(ring);
5095
5096                         amdgpu_fence_driver_isr_toggle(adev, false);
5097
5098                         /* Since the job won't signal and we go for
5099                          * another resubmit drop this parent pointer
5100                          */
5101                         dma_fence_put(s_job->s_fence->parent);
5102                         s_job->s_fence->parent = NULL;
5103
5104                         /* set guilty */
5105                         drm_sched_increase_karma(s_job);
5106                         amdgpu_reset_prepare_hwcontext(adev, reset_context);
5107 retry:
5108                         /* do hw reset */
5109                         if (amdgpu_sriov_vf(adev)) {
5110                                 amdgpu_virt_fini_data_exchange(adev);
5111                                 r = amdgpu_device_reset_sriov(adev, false);
5112                                 if (r)
5113                                         adev->asic_reset_res = r;
5114                         } else {
5115                                 clear_bit(AMDGPU_SKIP_HW_RESET,
5116                                           &reset_context->flags);
5117                                 r = amdgpu_do_asic_reset(device_list_handle,
5118                                                          reset_context);
5119                                 if (r && r == -EAGAIN)
5120                                         goto retry;
5121                         }
5122
5123                         /*
5124                          * add reset counter so that the following
5125                          * resubmitted job could flush vmid
5126                          */
5127                         atomic_inc(&adev->gpu_reset_counter);
5128                         continue;
5129                 }
5130
5131                 /* got the hw fence, signal finished fence */
5132                 atomic_dec(ring->sched.score);
5133                 dma_fence_get(&s_job->s_fence->finished);
5134                 dma_fence_signal(&s_job->s_fence->finished);
5135                 dma_fence_put(&s_job->s_fence->finished);
5136
5137                 /* remove node from list and free the job */
5138                 spin_lock(&ring->sched.job_list_lock);
5139                 list_del_init(&s_job->list);
5140                 spin_unlock(&ring->sched.job_list_lock);
5141                 ring->sched.ops->free_job(s_job);
5142         }
5143 }
5144
5145 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5146 {
5147         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5148
5149 #if defined(CONFIG_DEBUG_FS)
5150         if (!amdgpu_sriov_vf(adev))
5151                 cancel_work(&adev->reset_work);
5152 #endif
5153
5154         if (adev->kfd.dev)
5155                 cancel_work(&adev->kfd.reset_work);
5156
5157         if (amdgpu_sriov_vf(adev))
5158                 cancel_work(&adev->virt.flr_work);
5159
5160         if (con && adev->ras_enabled)
5161                 cancel_work(&con->recovery_work);
5162
5163 }
5164
5165
5166 /**
5167  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5168  *
5169  * @adev: amdgpu_device pointer
5170  * @job: which job trigger hang
5171  *
5172  * Attempt to reset the GPU if it has hung (all asics).
5173  * Attempt to do soft-reset or full-reset and reinitialize Asic
5174  * Returns 0 for success or an error on failure.
5175  */
5176
5177 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5178                               struct amdgpu_job *job,
5179                               struct amdgpu_reset_context *reset_context)
5180 {
5181         struct list_head device_list, *device_list_handle =  NULL;
5182         bool job_signaled = false;
5183         struct amdgpu_hive_info *hive = NULL;
5184         struct amdgpu_device *tmp_adev = NULL;
5185         int i, r = 0;
5186         bool need_emergency_restart = false;
5187         bool audio_suspended = false;
5188         int tmp_vram_lost_counter;
5189         bool gpu_reset_for_dev_remove = false;
5190
5191         gpu_reset_for_dev_remove =
5192                         test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5193                                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5194
5195         /*
5196          * Special case: RAS triggered and full reset isn't supported
5197          */
5198         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5199
5200         /*
5201          * Flush RAM to disk so that after reboot
5202          * the user can read log and see why the system rebooted.
5203          */
5204         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5205                 DRM_WARN("Emergency reboot.");
5206
5207                 ksys_sync_helper();
5208                 emergency_restart();
5209         }
5210
5211         dev_info(adev->dev, "GPU %s begin!\n",
5212                 need_emergency_restart ? "jobs stop":"reset");
5213
5214         if (!amdgpu_sriov_vf(adev))
5215                 hive = amdgpu_get_xgmi_hive(adev);
5216         if (hive)
5217                 mutex_lock(&hive->hive_lock);
5218
5219         reset_context->job = job;
5220         reset_context->hive = hive;
5221         /*
5222          * Build list of devices to reset.
5223          * In case we are in XGMI hive mode, resort the device list
5224          * to put adev in the 1st position.
5225          */
5226         INIT_LIST_HEAD(&device_list);
5227         if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5228                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5229                         list_add_tail(&tmp_adev->reset_list, &device_list);
5230                         if (gpu_reset_for_dev_remove && adev->shutdown)
5231                                 tmp_adev->shutdown = true;
5232                 }
5233                 if (!list_is_first(&adev->reset_list, &device_list))
5234                         list_rotate_to_front(&adev->reset_list, &device_list);
5235                 device_list_handle = &device_list;
5236         } else {
5237                 list_add_tail(&adev->reset_list, &device_list);
5238                 device_list_handle = &device_list;
5239         }
5240
5241         /* We need to lock reset domain only once both for XGMI and single device */
5242         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5243                                     reset_list);
5244         amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5245
5246         /* block all schedulers and reset given job's ring */
5247         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5248
5249                 amdgpu_device_set_mp1_state(tmp_adev);
5250
5251                 /*
5252                  * Try to put the audio codec into suspend state
5253                  * before gpu reset started.
5254                  *
5255                  * Due to the power domain of the graphics device
5256                  * is shared with AZ power domain. Without this,
5257                  * we may change the audio hardware from behind
5258                  * the audio driver's back. That will trigger
5259                  * some audio codec errors.
5260                  */
5261                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5262                         audio_suspended = true;
5263
5264                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5265
5266                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5267
5268                 if (!amdgpu_sriov_vf(tmp_adev))
5269                         amdgpu_amdkfd_pre_reset(tmp_adev);
5270
5271                 /*
5272                  * Mark these ASICs to be reseted as untracked first
5273                  * And add them back after reset completed
5274                  */
5275                 amdgpu_unregister_gpu_instance(tmp_adev);
5276
5277                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5278
5279                 /* disable ras on ALL IPs */
5280                 if (!need_emergency_restart &&
5281                       amdgpu_device_ip_need_full_reset(tmp_adev))
5282                         amdgpu_ras_suspend(tmp_adev);
5283
5284                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5285                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5286
5287                         if (!ring || !ring->sched.thread)
5288                                 continue;
5289
5290                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5291
5292                         if (need_emergency_restart)
5293                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5294                 }
5295                 atomic_inc(&tmp_adev->gpu_reset_counter);
5296         }
5297
5298         if (need_emergency_restart)
5299                 goto skip_sched_resume;
5300
5301         /*
5302          * Must check guilty signal here since after this point all old
5303          * HW fences are force signaled.
5304          *
5305          * job->base holds a reference to parent fence
5306          */
5307         if (job && dma_fence_is_signaled(&job->hw_fence)) {
5308                 job_signaled = true;
5309                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5310                 goto skip_hw_reset;
5311         }
5312
5313 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
5314         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5315                 if (gpu_reset_for_dev_remove) {
5316                         /* Workaroud for ASICs need to disable SMC first */
5317                         amdgpu_device_smu_fini_early(tmp_adev);
5318                 }
5319                 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5320                 /*TODO Should we stop ?*/
5321                 if (r) {
5322                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5323                                   r, adev_to_drm(tmp_adev)->unique);
5324                         tmp_adev->asic_reset_res = r;
5325                 }
5326
5327                 /*
5328                  * Drop all pending non scheduler resets. Scheduler resets
5329                  * were already dropped during drm_sched_stop
5330                  */
5331                 amdgpu_device_stop_pending_resets(tmp_adev);
5332         }
5333
5334         tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5335         /* Actual ASIC resets if needed.*/
5336         /* Host driver will handle XGMI hive reset for SRIOV */
5337         if (amdgpu_sriov_vf(adev)) {
5338                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5339                 if (r)
5340                         adev->asic_reset_res = r;
5341
5342                 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5343                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5344                         amdgpu_ras_resume(adev);
5345         } else {
5346                 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5347                 if (r && r == -EAGAIN)
5348                         goto retry;
5349
5350                 if (!r && gpu_reset_for_dev_remove)
5351                         goto recover_end;
5352         }
5353
5354 skip_hw_reset:
5355
5356         /* Post ASIC reset for all devs .*/
5357         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5358
5359                 /*
5360                  * Sometimes a later bad compute job can block a good gfx job as gfx
5361                  * and compute ring share internal GC HW mutually. We add an additional
5362                  * guilty jobs recheck step to find the real guilty job, it synchronously
5363                  * submits and pends for the first job being signaled. If it gets timeout,
5364                  * we identify it as a real guilty job.
5365                  */
5366                 if (amdgpu_gpu_recovery == 2 &&
5367                         !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5368                         amdgpu_device_recheck_guilty_jobs(
5369                                 tmp_adev, device_list_handle, reset_context);
5370
5371                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5372                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5373
5374                         if (!ring || !ring->sched.thread)
5375                                 continue;
5376
5377                         /* No point to resubmit jobs if we didn't HW reset*/
5378                         if (!tmp_adev->asic_reset_res && !job_signaled)
5379                                 drm_sched_resubmit_jobs(&ring->sched);
5380
5381                         drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5382                 }
5383
5384                 if (adev->enable_mes)
5385                         amdgpu_mes_self_test(tmp_adev);
5386
5387                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5388                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5389                 }
5390
5391                 if (tmp_adev->asic_reset_res)
5392                         r = tmp_adev->asic_reset_res;
5393
5394                 tmp_adev->asic_reset_res = 0;
5395
5396                 if (r) {
5397                         /* bad news, how to tell it to userspace ? */
5398                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5399                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5400                 } else {
5401                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5402                         if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5403                                 DRM_WARN("smart shift update failed\n");
5404                 }
5405         }
5406
5407 skip_sched_resume:
5408         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5409                 /* unlock kfd: SRIOV would do it separately */
5410                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5411                         amdgpu_amdkfd_post_reset(tmp_adev);
5412
5413                 /* kfd_post_reset will do nothing if kfd device is not initialized,
5414                  * need to bring up kfd here if it's not be initialized before
5415                  */
5416                 if (!adev->kfd.init_complete)
5417                         amdgpu_amdkfd_device_init(adev);
5418
5419                 if (audio_suspended)
5420                         amdgpu_device_resume_display_audio(tmp_adev);
5421
5422                 amdgpu_device_unset_mp1_state(tmp_adev);
5423         }
5424
5425 recover_end:
5426         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5427                                             reset_list);
5428         amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5429
5430         if (hive) {
5431                 mutex_unlock(&hive->hive_lock);
5432                 amdgpu_put_xgmi_hive(hive);
5433         }
5434
5435         if (r)
5436                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5437
5438         atomic_set(&adev->reset_domain->reset_res, r);
5439         return r;
5440 }
5441
5442 /**
5443  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5444  *
5445  * @adev: amdgpu_device pointer
5446  *
5447  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5448  * and lanes) of the slot the device is in. Handles APUs and
5449  * virtualized environments where PCIE config space may not be available.
5450  */
5451 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5452 {
5453         struct pci_dev *pdev;
5454         enum pci_bus_speed speed_cap, platform_speed_cap;
5455         enum pcie_link_width platform_link_width;
5456
5457         if (amdgpu_pcie_gen_cap)
5458                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5459
5460         if (amdgpu_pcie_lane_cap)
5461                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5462
5463         /* covers APUs as well */
5464         if (pci_is_root_bus(adev->pdev->bus)) {
5465                 if (adev->pm.pcie_gen_mask == 0)
5466                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5467                 if (adev->pm.pcie_mlw_mask == 0)
5468                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5469                 return;
5470         }
5471
5472         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5473                 return;
5474
5475         pcie_bandwidth_available(adev->pdev, NULL,
5476                                  &platform_speed_cap, &platform_link_width);
5477
5478         if (adev->pm.pcie_gen_mask == 0) {
5479                 /* asic caps */
5480                 pdev = adev->pdev;
5481                 speed_cap = pcie_get_speed_cap(pdev);
5482                 if (speed_cap == PCI_SPEED_UNKNOWN) {
5483                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5484                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5485                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5486                 } else {
5487                         if (speed_cap == PCIE_SPEED_32_0GT)
5488                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5489                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5490                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5491                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5492                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5493                         else if (speed_cap == PCIE_SPEED_16_0GT)
5494                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5495                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5496                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5497                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5498                         else if (speed_cap == PCIE_SPEED_8_0GT)
5499                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5500                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5501                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5502                         else if (speed_cap == PCIE_SPEED_5_0GT)
5503                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5504                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5505                         else
5506                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5507                 }
5508                 /* platform caps */
5509                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5510                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5511                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5512                 } else {
5513                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
5514                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5515                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5516                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5517                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5518                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5519                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5520                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5521                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5522                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5523                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5524                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5525                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5526                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5527                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5528                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5529                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5530                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5531                         else
5532                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5533
5534                 }
5535         }
5536         if (adev->pm.pcie_mlw_mask == 0) {
5537                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5538                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5539                 } else {
5540                         switch (platform_link_width) {
5541                         case PCIE_LNK_X32:
5542                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5543                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5544                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5545                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5546                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5547                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5548                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5549                                 break;
5550                         case PCIE_LNK_X16:
5551                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5552                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5553                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5554                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5555                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5556                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5557                                 break;
5558                         case PCIE_LNK_X12:
5559                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5560                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5561                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5562                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5563                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5564                                 break;
5565                         case PCIE_LNK_X8:
5566                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5567                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5568                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5569                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5570                                 break;
5571                         case PCIE_LNK_X4:
5572                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5573                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5574                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5575                                 break;
5576                         case PCIE_LNK_X2:
5577                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5578                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5579                                 break;
5580                         case PCIE_LNK_X1:
5581                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5582                                 break;
5583                         default:
5584                                 break;
5585                         }
5586                 }
5587         }
5588 }
5589
5590 /**
5591  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5592  *
5593  * @adev: amdgpu_device pointer
5594  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5595  *
5596  * Return true if @peer_adev can access (DMA) @adev through the PCIe
5597  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5598  * @peer_adev.
5599  */
5600 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5601                                       struct amdgpu_device *peer_adev)
5602 {
5603 #ifdef CONFIG_HSA_AMD_P2P
5604         uint64_t address_mask = peer_adev->dev->dma_mask ?
5605                 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5606         resource_size_t aper_limit =
5607                 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5608         bool p2p_access =
5609                 !adev->gmc.xgmi.connected_to_cpu &&
5610                 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5611
5612         return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5613                 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5614                 !(adev->gmc.aper_base & address_mask ||
5615                   aper_limit & address_mask));
5616 #else
5617         return false;
5618 #endif
5619 }
5620
5621 int amdgpu_device_baco_enter(struct drm_device *dev)
5622 {
5623         struct amdgpu_device *adev = drm_to_adev(dev);
5624         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5625
5626         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5627                 return -ENOTSUPP;
5628
5629         if (ras && adev->ras_enabled &&
5630             adev->nbio.funcs->enable_doorbell_interrupt)
5631                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5632
5633         return amdgpu_dpm_baco_enter(adev);
5634 }
5635
5636 int amdgpu_device_baco_exit(struct drm_device *dev)
5637 {
5638         struct amdgpu_device *adev = drm_to_adev(dev);
5639         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5640         int ret = 0;
5641
5642         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5643                 return -ENOTSUPP;
5644
5645         ret = amdgpu_dpm_baco_exit(adev);
5646         if (ret)
5647                 return ret;
5648
5649         if (ras && adev->ras_enabled &&
5650             adev->nbio.funcs->enable_doorbell_interrupt)
5651                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5652
5653         if (amdgpu_passthrough(adev) &&
5654             adev->nbio.funcs->clear_doorbell_interrupt)
5655                 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5656
5657         return 0;
5658 }
5659
5660 /**
5661  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5662  * @pdev: PCI device struct
5663  * @state: PCI channel state
5664  *
5665  * Description: Called when a PCI error is detected.
5666  *
5667  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5668  */
5669 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5670 {
5671         struct drm_device *dev = pci_get_drvdata(pdev);
5672         struct amdgpu_device *adev = drm_to_adev(dev);
5673         int i;
5674
5675         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5676
5677         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5678                 DRM_WARN("No support for XGMI hive yet...");
5679                 return PCI_ERS_RESULT_DISCONNECT;
5680         }
5681
5682         adev->pci_channel_state = state;
5683
5684         switch (state) {
5685         case pci_channel_io_normal:
5686                 return PCI_ERS_RESULT_CAN_RECOVER;
5687         /* Fatal error, prepare for slot reset */
5688         case pci_channel_io_frozen:
5689                 /*
5690                  * Locking adev->reset_domain->sem will prevent any external access
5691                  * to GPU during PCI error recovery
5692                  */
5693                 amdgpu_device_lock_reset_domain(adev->reset_domain);
5694                 amdgpu_device_set_mp1_state(adev);
5695
5696                 /*
5697                  * Block any work scheduling as we do for regular GPU reset
5698                  * for the duration of the recovery
5699                  */
5700                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5701                         struct amdgpu_ring *ring = adev->rings[i];
5702
5703                         if (!ring || !ring->sched.thread)
5704                                 continue;
5705
5706                         drm_sched_stop(&ring->sched, NULL);
5707                 }
5708                 atomic_inc(&adev->gpu_reset_counter);
5709                 return PCI_ERS_RESULT_NEED_RESET;
5710         case pci_channel_io_perm_failure:
5711                 /* Permanent error, prepare for device removal */
5712                 return PCI_ERS_RESULT_DISCONNECT;
5713         }
5714
5715         return PCI_ERS_RESULT_NEED_RESET;
5716 }
5717
5718 /**
5719  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5720  * @pdev: pointer to PCI device
5721  */
5722 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5723 {
5724
5725         DRM_INFO("PCI error: mmio enabled callback!!\n");
5726
5727         /* TODO - dump whatever for debugging purposes */
5728
5729         /* This called only if amdgpu_pci_error_detected returns
5730          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5731          * works, no need to reset slot.
5732          */
5733
5734         return PCI_ERS_RESULT_RECOVERED;
5735 }
5736
5737 /**
5738  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5739  * @pdev: PCI device struct
5740  *
5741  * Description: This routine is called by the pci error recovery
5742  * code after the PCI slot has been reset, just before we
5743  * should resume normal operations.
5744  */
5745 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5746 {
5747         struct drm_device *dev = pci_get_drvdata(pdev);
5748         struct amdgpu_device *adev = drm_to_adev(dev);
5749         int r, i;
5750         struct amdgpu_reset_context reset_context;
5751         u32 memsize;
5752         struct list_head device_list;
5753
5754         DRM_INFO("PCI error: slot reset callback!!\n");
5755
5756         memset(&reset_context, 0, sizeof(reset_context));
5757
5758         INIT_LIST_HEAD(&device_list);
5759         list_add_tail(&adev->reset_list, &device_list);
5760
5761         /* wait for asic to come out of reset */
5762         msleep(500);
5763
5764         /* Restore PCI confspace */
5765         amdgpu_device_load_pci_state(pdev);
5766
5767         /* confirm  ASIC came out of reset */
5768         for (i = 0; i < adev->usec_timeout; i++) {
5769                 memsize = amdgpu_asic_get_config_memsize(adev);
5770
5771                 if (memsize != 0xffffffff)
5772                         break;
5773                 udelay(1);
5774         }
5775         if (memsize == 0xffffffff) {
5776                 r = -ETIME;
5777                 goto out;
5778         }
5779
5780         reset_context.method = AMD_RESET_METHOD_NONE;
5781         reset_context.reset_req_dev = adev;
5782         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5783         set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5784
5785         adev->no_hw_access = true;
5786         r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5787         adev->no_hw_access = false;
5788         if (r)
5789                 goto out;
5790
5791         r = amdgpu_do_asic_reset(&device_list, &reset_context);
5792
5793 out:
5794         if (!r) {
5795                 if (amdgpu_device_cache_pci_state(adev->pdev))
5796                         pci_restore_state(adev->pdev);
5797
5798                 DRM_INFO("PCIe error recovery succeeded\n");
5799         } else {
5800                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5801                 amdgpu_device_unset_mp1_state(adev);
5802                 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5803         }
5804
5805         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5806 }
5807
5808 /**
5809  * amdgpu_pci_resume() - resume normal ops after PCI reset
5810  * @pdev: pointer to PCI device
5811  *
5812  * Called when the error recovery driver tells us that its
5813  * OK to resume normal operation.
5814  */
5815 void amdgpu_pci_resume(struct pci_dev *pdev)
5816 {
5817         struct drm_device *dev = pci_get_drvdata(pdev);
5818         struct amdgpu_device *adev = drm_to_adev(dev);
5819         int i;
5820
5821
5822         DRM_INFO("PCI error: resume callback!!\n");
5823
5824         /* Only continue execution for the case of pci_channel_io_frozen */
5825         if (adev->pci_channel_state != pci_channel_io_frozen)
5826                 return;
5827
5828         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5829                 struct amdgpu_ring *ring = adev->rings[i];
5830
5831                 if (!ring || !ring->sched.thread)
5832                         continue;
5833
5834
5835                 drm_sched_resubmit_jobs(&ring->sched);
5836                 drm_sched_start(&ring->sched, true);
5837         }
5838
5839         amdgpu_device_unset_mp1_state(adev);
5840         amdgpu_device_unlock_reset_domain(adev->reset_domain);
5841 }
5842
5843 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5844 {
5845         struct drm_device *dev = pci_get_drvdata(pdev);
5846         struct amdgpu_device *adev = drm_to_adev(dev);
5847         int r;
5848
5849         r = pci_save_state(pdev);
5850         if (!r) {
5851                 kfree(adev->pci_state);
5852
5853                 adev->pci_state = pci_store_saved_state(pdev);
5854
5855                 if (!adev->pci_state) {
5856                         DRM_ERROR("Failed to store PCI saved state");
5857                         return false;
5858                 }
5859         } else {
5860                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5861                 return false;
5862         }
5863
5864         return true;
5865 }
5866
5867 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5868 {
5869         struct drm_device *dev = pci_get_drvdata(pdev);
5870         struct amdgpu_device *adev = drm_to_adev(dev);
5871         int r;
5872
5873         if (!adev->pci_state)
5874                 return false;
5875
5876         r = pci_load_saved_state(pdev, adev->pci_state);
5877
5878         if (!r) {
5879                 pci_restore_state(pdev);
5880         } else {
5881                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5882                 return false;
5883         }
5884
5885         return true;
5886 }
5887
5888 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5889                 struct amdgpu_ring *ring)
5890 {
5891 #ifdef CONFIG_X86_64
5892         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5893                 return;
5894 #endif
5895         if (adev->gmc.xgmi.connected_to_cpu)
5896                 return;
5897
5898         if (ring && ring->funcs->emit_hdp_flush)
5899                 amdgpu_ring_emit_hdp_flush(ring);
5900         else
5901                 amdgpu_asic_flush_hdp(adev, ring);
5902 }
5903
5904 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5905                 struct amdgpu_ring *ring)
5906 {
5907 #ifdef CONFIG_X86_64
5908         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5909                 return;
5910 #endif
5911         if (adev->gmc.xgmi.connected_to_cpu)
5912                 return;
5913
5914         amdgpu_asic_invalidate_hdp(adev, ring);
5915 }
5916
5917 int amdgpu_in_reset(struct amdgpu_device *adev)
5918 {
5919         return atomic_read(&adev->reset_domain->in_gpu_reset);
5920         }
5921         
5922 /**
5923  * amdgpu_device_halt() - bring hardware to some kind of halt state
5924  *
5925  * @adev: amdgpu_device pointer
5926  *
5927  * Bring hardware to some kind of halt state so that no one can touch it
5928  * any more. It will help to maintain error context when error occurred.
5929  * Compare to a simple hang, the system will keep stable at least for SSH
5930  * access. Then it should be trivial to inspect the hardware state and
5931  * see what's going on. Implemented as following:
5932  *
5933  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5934  *    clears all CPU mappings to device, disallows remappings through page faults
5935  * 2. amdgpu_irq_disable_all() disables all interrupts
5936  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5937  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5938  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5939  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5940  *    flush any in flight DMA operations
5941  */
5942 void amdgpu_device_halt(struct amdgpu_device *adev)
5943 {
5944         struct pci_dev *pdev = adev->pdev;
5945         struct drm_device *ddev = adev_to_drm(adev);
5946
5947         drm_dev_unplug(ddev);
5948
5949         amdgpu_irq_disable_all(adev);
5950
5951         amdgpu_fence_driver_hw_fini(adev);
5952
5953         adev->no_hw_access = true;
5954
5955         amdgpu_device_unmap_mmio(adev);
5956
5957         pci_disable_device(pdev);
5958         pci_wait_for_pending_transaction(pdev);
5959 }
5960
5961 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5962                                 u32 reg)
5963 {
5964         unsigned long flags, address, data;
5965         u32 r;
5966
5967         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5968         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5969
5970         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5971         WREG32(address, reg * 4);
5972         (void)RREG32(address);
5973         r = RREG32(data);
5974         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5975         return r;
5976 }
5977
5978 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5979                                 u32 reg, u32 v)
5980 {
5981         unsigned long flags, address, data;
5982
5983         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5984         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5985
5986         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5987         WREG32(address, reg * 4);
5988         (void)RREG32(address);
5989         WREG32(data, v);
5990         (void)RREG32(data);
5991         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5992 }
5993
5994 /**
5995  * amdgpu_device_switch_gang - switch to a new gang
5996  * @adev: amdgpu_device pointer
5997  * @gang: the gang to switch to
5998  *
5999  * Try to switch to a new gang.
6000  * Returns: NULL if we switched to the new gang or a reference to the current
6001  * gang leader.
6002  */
6003 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6004                                             struct dma_fence *gang)
6005 {
6006         struct dma_fence *old = NULL;
6007
6008         do {
6009                 dma_fence_put(old);
6010                 rcu_read_lock();
6011                 old = dma_fence_get_rcu_safe(&adev->gang_submit);
6012                 rcu_read_unlock();
6013
6014                 if (old == gang)
6015                         break;
6016
6017                 if (!dma_fence_is_signaled(old))
6018                         return old;
6019
6020         } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6021                          old, gang) != old);
6022
6023         dma_fence_put(old);
6024         return NULL;
6025 }
This page took 0.394656 seconds and 4 git commands to generate.