]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
Merge tag 'for-linus-6.1-1' of https://github.com/cminyard/linux-ipmi
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_probe_helper.h>
41 #include <drm/amdgpu_drm.h>
42 #include <linux/vgaarb.h>
43 #include <linux/vga_switcheroo.h>
44 #include <linux/efi.h>
45 #include "amdgpu.h"
46 #include "amdgpu_trace.h"
47 #include "amdgpu_i2c.h"
48 #include "atom.h"
49 #include "amdgpu_atombios.h"
50 #include "amdgpu_atomfirmware.h"
51 #include "amd_pcie.h"
52 #ifdef CONFIG_DRM_AMDGPU_SI
53 #include "si.h"
54 #endif
55 #ifdef CONFIG_DRM_AMDGPU_CIK
56 #include "cik.h"
57 #endif
58 #include "vi.h"
59 #include "soc15.h"
60 #include "nv.h"
61 #include "bif/bif_4_1_d.h"
62 #include <linux/firmware.h>
63 #include "amdgpu_vf_error.h"
64
65 #include "amdgpu_amdkfd.h"
66 #include "amdgpu_pm.h"
67
68 #include "amdgpu_xgmi.h"
69 #include "amdgpu_ras.h"
70 #include "amdgpu_pmu.h"
71 #include "amdgpu_fru_eeprom.h"
72 #include "amdgpu_reset.h"
73
74 #include <linux/suspend.h>
75 #include <drm/task_barrier.h>
76 #include <linux/pm_runtime.h>
77
78 #include <drm/drm_drv.h>
79
80 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
87
88 #define AMDGPU_RESUME_MS                2000
89 #define AMDGPU_MAX_RETRY_LIMIT          2
90 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
91
92 const char *amdgpu_asic_name[] = {
93         "TAHITI",
94         "PITCAIRN",
95         "VERDE",
96         "OLAND",
97         "HAINAN",
98         "BONAIRE",
99         "KAVERI",
100         "KABINI",
101         "HAWAII",
102         "MULLINS",
103         "TOPAZ",
104         "TONGA",
105         "FIJI",
106         "CARRIZO",
107         "STONEY",
108         "POLARIS10",
109         "POLARIS11",
110         "POLARIS12",
111         "VEGAM",
112         "VEGA10",
113         "VEGA12",
114         "VEGA20",
115         "RAVEN",
116         "ARCTURUS",
117         "RENOIR",
118         "ALDEBARAN",
119         "NAVI10",
120         "CYAN_SKILLFISH",
121         "NAVI14",
122         "NAVI12",
123         "SIENNA_CICHLID",
124         "NAVY_FLOUNDER",
125         "VANGOGH",
126         "DIMGREY_CAVEFISH",
127         "BEIGE_GOBY",
128         "YELLOW_CARP",
129         "IP DISCOVERY",
130         "LAST",
131 };
132
133 /**
134  * DOC: pcie_replay_count
135  *
136  * The amdgpu driver provides a sysfs API for reporting the total number
137  * of PCIe replays (NAKs)
138  * The file pcie_replay_count is used for this and returns the total
139  * number of replays as a sum of the NAKs generated and NAKs received
140  */
141
142 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
143                 struct device_attribute *attr, char *buf)
144 {
145         struct drm_device *ddev = dev_get_drvdata(dev);
146         struct amdgpu_device *adev = drm_to_adev(ddev);
147         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
148
149         return sysfs_emit(buf, "%llu\n", cnt);
150 }
151
152 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
153                 amdgpu_device_get_pcie_replay_count, NULL);
154
155 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
156
157 /**
158  * DOC: product_name
159  *
160  * The amdgpu driver provides a sysfs API for reporting the product name
161  * for the device
162  * The file serial_number is used for this and returns the product name
163  * as returned from the FRU.
164  * NOTE: This is only available for certain server cards
165  */
166
167 static ssize_t amdgpu_device_get_product_name(struct device *dev,
168                 struct device_attribute *attr, char *buf)
169 {
170         struct drm_device *ddev = dev_get_drvdata(dev);
171         struct amdgpu_device *adev = drm_to_adev(ddev);
172
173         return sysfs_emit(buf, "%s\n", adev->product_name);
174 }
175
176 static DEVICE_ATTR(product_name, S_IRUGO,
177                 amdgpu_device_get_product_name, NULL);
178
179 /**
180  * DOC: product_number
181  *
182  * The amdgpu driver provides a sysfs API for reporting the part number
183  * for the device
184  * The file serial_number is used for this and returns the part number
185  * as returned from the FRU.
186  * NOTE: This is only available for certain server cards
187  */
188
189 static ssize_t amdgpu_device_get_product_number(struct device *dev,
190                 struct device_attribute *attr, char *buf)
191 {
192         struct drm_device *ddev = dev_get_drvdata(dev);
193         struct amdgpu_device *adev = drm_to_adev(ddev);
194
195         return sysfs_emit(buf, "%s\n", adev->product_number);
196 }
197
198 static DEVICE_ATTR(product_number, S_IRUGO,
199                 amdgpu_device_get_product_number, NULL);
200
201 /**
202  * DOC: serial_number
203  *
204  * The amdgpu driver provides a sysfs API for reporting the serial number
205  * for the device
206  * The file serial_number is used for this and returns the serial number
207  * as returned from the FRU.
208  * NOTE: This is only available for certain server cards
209  */
210
211 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
212                 struct device_attribute *attr, char *buf)
213 {
214         struct drm_device *ddev = dev_get_drvdata(dev);
215         struct amdgpu_device *adev = drm_to_adev(ddev);
216
217         return sysfs_emit(buf, "%s\n", adev->serial);
218 }
219
220 static DEVICE_ATTR(serial_number, S_IRUGO,
221                 amdgpu_device_get_serial_number, NULL);
222
223 /**
224  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
225  *
226  * @dev: drm_device pointer
227  *
228  * Returns true if the device is a dGPU with ATPX power control,
229  * otherwise return false.
230  */
231 bool amdgpu_device_supports_px(struct drm_device *dev)
232 {
233         struct amdgpu_device *adev = drm_to_adev(dev);
234
235         if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
236                 return true;
237         return false;
238 }
239
240 /**
241  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
242  *
243  * @dev: drm_device pointer
244  *
245  * Returns true if the device is a dGPU with ACPI power control,
246  * otherwise return false.
247  */
248 bool amdgpu_device_supports_boco(struct drm_device *dev)
249 {
250         struct amdgpu_device *adev = drm_to_adev(dev);
251
252         if (adev->has_pr3 ||
253             ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
254                 return true;
255         return false;
256 }
257
258 /**
259  * amdgpu_device_supports_baco - Does the device support BACO
260  *
261  * @dev: drm_device pointer
262  *
263  * Returns true if the device supporte BACO,
264  * otherwise return false.
265  */
266 bool amdgpu_device_supports_baco(struct drm_device *dev)
267 {
268         struct amdgpu_device *adev = drm_to_adev(dev);
269
270         return amdgpu_asic_supports_baco(adev);
271 }
272
273 /**
274  * amdgpu_device_supports_smart_shift - Is the device dGPU with
275  * smart shift support
276  *
277  * @dev: drm_device pointer
278  *
279  * Returns true if the device is a dGPU with Smart Shift support,
280  * otherwise returns false.
281  */
282 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
283 {
284         return (amdgpu_device_supports_boco(dev) &&
285                 amdgpu_acpi_is_power_shift_control_supported());
286 }
287
288 /*
289  * VRAM access helper functions
290  */
291
292 /**
293  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
294  *
295  * @adev: amdgpu_device pointer
296  * @pos: offset of the buffer in vram
297  * @buf: virtual address of the buffer in system memory
298  * @size: read/write size, sizeof(@buf) must > @size
299  * @write: true - write to vram, otherwise - read from vram
300  */
301 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
302                              void *buf, size_t size, bool write)
303 {
304         unsigned long flags;
305         uint32_t hi = ~0, tmp = 0;
306         uint32_t *data = buf;
307         uint64_t last;
308         int idx;
309
310         if (!drm_dev_enter(adev_to_drm(adev), &idx))
311                 return;
312
313         BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
314
315         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
316         for (last = pos + size; pos < last; pos += 4) {
317                 tmp = pos >> 31;
318
319                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
320                 if (tmp != hi) {
321                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
322                         hi = tmp;
323                 }
324                 if (write)
325                         WREG32_NO_KIQ(mmMM_DATA, *data++);
326                 else
327                         *data++ = RREG32_NO_KIQ(mmMM_DATA);
328         }
329
330         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
331         drm_dev_exit(idx);
332 }
333
334 /**
335  * amdgpu_device_aper_access - access vram by vram aperature
336  *
337  * @adev: amdgpu_device pointer
338  * @pos: offset of the buffer in vram
339  * @buf: virtual address of the buffer in system memory
340  * @size: read/write size, sizeof(@buf) must > @size
341  * @write: true - write to vram, otherwise - read from vram
342  *
343  * The return value means how many bytes have been transferred.
344  */
345 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
346                                  void *buf, size_t size, bool write)
347 {
348 #ifdef CONFIG_64BIT
349         void __iomem *addr;
350         size_t count = 0;
351         uint64_t last;
352
353         if (!adev->mman.aper_base_kaddr)
354                 return 0;
355
356         last = min(pos + size, adev->gmc.visible_vram_size);
357         if (last > pos) {
358                 addr = adev->mman.aper_base_kaddr + pos;
359                 count = last - pos;
360
361                 if (write) {
362                         memcpy_toio(addr, buf, count);
363                         mb();
364                         amdgpu_device_flush_hdp(adev, NULL);
365                 } else {
366                         amdgpu_device_invalidate_hdp(adev, NULL);
367                         mb();
368                         memcpy_fromio(buf, addr, count);
369                 }
370
371         }
372
373         return count;
374 #else
375         return 0;
376 #endif
377 }
378
379 /**
380  * amdgpu_device_vram_access - read/write a buffer in vram
381  *
382  * @adev: amdgpu_device pointer
383  * @pos: offset of the buffer in vram
384  * @buf: virtual address of the buffer in system memory
385  * @size: read/write size, sizeof(@buf) must > @size
386  * @write: true - write to vram, otherwise - read from vram
387  */
388 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
389                                void *buf, size_t size, bool write)
390 {
391         size_t count;
392
393         /* try to using vram apreature to access vram first */
394         count = amdgpu_device_aper_access(adev, pos, buf, size, write);
395         size -= count;
396         if (size) {
397                 /* using MM to access rest vram */
398                 pos += count;
399                 buf += count;
400                 amdgpu_device_mm_access(adev, pos, buf, size, write);
401         }
402 }
403
404 /*
405  * register access helper functions.
406  */
407
408 /* Check if hw access should be skipped because of hotplug or device error */
409 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
410 {
411         if (adev->no_hw_access)
412                 return true;
413
414 #ifdef CONFIG_LOCKDEP
415         /*
416          * This is a bit complicated to understand, so worth a comment. What we assert
417          * here is that the GPU reset is not running on another thread in parallel.
418          *
419          * For this we trylock the read side of the reset semaphore, if that succeeds
420          * we know that the reset is not running in paralell.
421          *
422          * If the trylock fails we assert that we are either already holding the read
423          * side of the lock or are the reset thread itself and hold the write side of
424          * the lock.
425          */
426         if (in_task()) {
427                 if (down_read_trylock(&adev->reset_domain->sem))
428                         up_read(&adev->reset_domain->sem);
429                 else
430                         lockdep_assert_held(&adev->reset_domain->sem);
431         }
432 #endif
433         return false;
434 }
435
436 /**
437  * amdgpu_device_rreg - read a memory mapped IO or indirect register
438  *
439  * @adev: amdgpu_device pointer
440  * @reg: dword aligned register offset
441  * @acc_flags: access flags which require special behavior
442  *
443  * Returns the 32 bit value from the offset specified.
444  */
445 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
446                             uint32_t reg, uint32_t acc_flags)
447 {
448         uint32_t ret;
449
450         if (amdgpu_device_skip_hw_access(adev))
451                 return 0;
452
453         if ((reg * 4) < adev->rmmio_size) {
454                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
455                     amdgpu_sriov_runtime(adev) &&
456                     down_read_trylock(&adev->reset_domain->sem)) {
457                         ret = amdgpu_kiq_rreg(adev, reg);
458                         up_read(&adev->reset_domain->sem);
459                 } else {
460                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
461                 }
462         } else {
463                 ret = adev->pcie_rreg(adev, reg * 4);
464         }
465
466         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
467
468         return ret;
469 }
470
471 /*
472  * MMIO register read with bytes helper functions
473  * @offset:bytes offset from MMIO start
474  *
475 */
476
477 /**
478  * amdgpu_mm_rreg8 - read a memory mapped IO register
479  *
480  * @adev: amdgpu_device pointer
481  * @offset: byte aligned register offset
482  *
483  * Returns the 8 bit value from the offset specified.
484  */
485 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
486 {
487         if (amdgpu_device_skip_hw_access(adev))
488                 return 0;
489
490         if (offset < adev->rmmio_size)
491                 return (readb(adev->rmmio + offset));
492         BUG();
493 }
494
495 /*
496  * MMIO register write with bytes helper functions
497  * @offset:bytes offset from MMIO start
498  * @value: the value want to be written to the register
499  *
500 */
501 /**
502  * amdgpu_mm_wreg8 - read a memory mapped IO register
503  *
504  * @adev: amdgpu_device pointer
505  * @offset: byte aligned register offset
506  * @value: 8 bit value to write
507  *
508  * Writes the value specified to the offset specified.
509  */
510 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
511 {
512         if (amdgpu_device_skip_hw_access(adev))
513                 return;
514
515         if (offset < adev->rmmio_size)
516                 writeb(value, adev->rmmio + offset);
517         else
518                 BUG();
519 }
520
521 /**
522  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
523  *
524  * @adev: amdgpu_device pointer
525  * @reg: dword aligned register offset
526  * @v: 32 bit value to write to the register
527  * @acc_flags: access flags which require special behavior
528  *
529  * Writes the value specified to the offset specified.
530  */
531 void amdgpu_device_wreg(struct amdgpu_device *adev,
532                         uint32_t reg, uint32_t v,
533                         uint32_t acc_flags)
534 {
535         if (amdgpu_device_skip_hw_access(adev))
536                 return;
537
538         if ((reg * 4) < adev->rmmio_size) {
539                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
540                     amdgpu_sriov_runtime(adev) &&
541                     down_read_trylock(&adev->reset_domain->sem)) {
542                         amdgpu_kiq_wreg(adev, reg, v);
543                         up_read(&adev->reset_domain->sem);
544                 } else {
545                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
546                 }
547         } else {
548                 adev->pcie_wreg(adev, reg * 4, v);
549         }
550
551         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
552 }
553
554 /**
555  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
556  *
557  * @adev: amdgpu_device pointer
558  * @reg: mmio/rlc register
559  * @v: value to write
560  *
561  * this function is invoked only for the debugfs register access
562  */
563 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
564                              uint32_t reg, uint32_t v)
565 {
566         if (amdgpu_device_skip_hw_access(adev))
567                 return;
568
569         if (amdgpu_sriov_fullaccess(adev) &&
570             adev->gfx.rlc.funcs &&
571             adev->gfx.rlc.funcs->is_rlcg_access_range) {
572                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
573                         return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
574         } else if ((reg * 4) >= adev->rmmio_size) {
575                 adev->pcie_wreg(adev, reg * 4, v);
576         } else {
577                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
578         }
579 }
580
581 /**
582  * amdgpu_mm_rdoorbell - read a doorbell dword
583  *
584  * @adev: amdgpu_device pointer
585  * @index: doorbell index
586  *
587  * Returns the value in the doorbell aperture at the
588  * requested doorbell index (CIK).
589  */
590 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
591 {
592         if (amdgpu_device_skip_hw_access(adev))
593                 return 0;
594
595         if (index < adev->doorbell.num_doorbells) {
596                 return readl(adev->doorbell.ptr + index);
597         } else {
598                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
599                 return 0;
600         }
601 }
602
603 /**
604  * amdgpu_mm_wdoorbell - write a doorbell dword
605  *
606  * @adev: amdgpu_device pointer
607  * @index: doorbell index
608  * @v: value to write
609  *
610  * Writes @v to the doorbell aperture at the
611  * requested doorbell index (CIK).
612  */
613 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
614 {
615         if (amdgpu_device_skip_hw_access(adev))
616                 return;
617
618         if (index < adev->doorbell.num_doorbells) {
619                 writel(v, adev->doorbell.ptr + index);
620         } else {
621                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
622         }
623 }
624
625 /**
626  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
627  *
628  * @adev: amdgpu_device pointer
629  * @index: doorbell index
630  *
631  * Returns the value in the doorbell aperture at the
632  * requested doorbell index (VEGA10+).
633  */
634 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
635 {
636         if (amdgpu_device_skip_hw_access(adev))
637                 return 0;
638
639         if (index < adev->doorbell.num_doorbells) {
640                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
641         } else {
642                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
643                 return 0;
644         }
645 }
646
647 /**
648  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
649  *
650  * @adev: amdgpu_device pointer
651  * @index: doorbell index
652  * @v: value to write
653  *
654  * Writes @v to the doorbell aperture at the
655  * requested doorbell index (VEGA10+).
656  */
657 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
658 {
659         if (amdgpu_device_skip_hw_access(adev))
660                 return;
661
662         if (index < adev->doorbell.num_doorbells) {
663                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
664         } else {
665                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
666         }
667 }
668
669 /**
670  * amdgpu_device_indirect_rreg - read an indirect register
671  *
672  * @adev: amdgpu_device pointer
673  * @pcie_index: mmio register offset
674  * @pcie_data: mmio register offset
675  * @reg_addr: indirect register address to read from
676  *
677  * Returns the value of indirect register @reg_addr
678  */
679 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
680                                 u32 pcie_index, u32 pcie_data,
681                                 u32 reg_addr)
682 {
683         unsigned long flags;
684         u32 r;
685         void __iomem *pcie_index_offset;
686         void __iomem *pcie_data_offset;
687
688         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
689         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
690         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
691
692         writel(reg_addr, pcie_index_offset);
693         readl(pcie_index_offset);
694         r = readl(pcie_data_offset);
695         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
696
697         return r;
698 }
699
700 /**
701  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
702  *
703  * @adev: amdgpu_device pointer
704  * @pcie_index: mmio register offset
705  * @pcie_data: mmio register offset
706  * @reg_addr: indirect register address to read from
707  *
708  * Returns the value of indirect register @reg_addr
709  */
710 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
711                                   u32 pcie_index, u32 pcie_data,
712                                   u32 reg_addr)
713 {
714         unsigned long flags;
715         u64 r;
716         void __iomem *pcie_index_offset;
717         void __iomem *pcie_data_offset;
718
719         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
720         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
721         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
722
723         /* read low 32 bits */
724         writel(reg_addr, pcie_index_offset);
725         readl(pcie_index_offset);
726         r = readl(pcie_data_offset);
727         /* read high 32 bits */
728         writel(reg_addr + 4, pcie_index_offset);
729         readl(pcie_index_offset);
730         r |= ((u64)readl(pcie_data_offset) << 32);
731         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
732
733         return r;
734 }
735
736 /**
737  * amdgpu_device_indirect_wreg - write an indirect register address
738  *
739  * @adev: amdgpu_device pointer
740  * @pcie_index: mmio register offset
741  * @pcie_data: mmio register offset
742  * @reg_addr: indirect register offset
743  * @reg_data: indirect register data
744  *
745  */
746 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
747                                  u32 pcie_index, u32 pcie_data,
748                                  u32 reg_addr, u32 reg_data)
749 {
750         unsigned long flags;
751         void __iomem *pcie_index_offset;
752         void __iomem *pcie_data_offset;
753
754         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
755         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
756         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
757
758         writel(reg_addr, pcie_index_offset);
759         readl(pcie_index_offset);
760         writel(reg_data, pcie_data_offset);
761         readl(pcie_data_offset);
762         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
763 }
764
765 /**
766  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
767  *
768  * @adev: amdgpu_device pointer
769  * @pcie_index: mmio register offset
770  * @pcie_data: mmio register offset
771  * @reg_addr: indirect register offset
772  * @reg_data: indirect register data
773  *
774  */
775 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
776                                    u32 pcie_index, u32 pcie_data,
777                                    u32 reg_addr, u64 reg_data)
778 {
779         unsigned long flags;
780         void __iomem *pcie_index_offset;
781         void __iomem *pcie_data_offset;
782
783         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
784         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
785         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
786
787         /* write low 32 bits */
788         writel(reg_addr, pcie_index_offset);
789         readl(pcie_index_offset);
790         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
791         readl(pcie_data_offset);
792         /* write high 32 bits */
793         writel(reg_addr + 4, pcie_index_offset);
794         readl(pcie_index_offset);
795         writel((u32)(reg_data >> 32), pcie_data_offset);
796         readl(pcie_data_offset);
797         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
798 }
799
800 /**
801  * amdgpu_invalid_rreg - dummy reg read function
802  *
803  * @adev: amdgpu_device pointer
804  * @reg: offset of register
805  *
806  * Dummy register read function.  Used for register blocks
807  * that certain asics don't have (all asics).
808  * Returns the value in the register.
809  */
810 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
811 {
812         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
813         BUG();
814         return 0;
815 }
816
817 /**
818  * amdgpu_invalid_wreg - dummy reg write function
819  *
820  * @adev: amdgpu_device pointer
821  * @reg: offset of register
822  * @v: value to write to the register
823  *
824  * Dummy register read function.  Used for register blocks
825  * that certain asics don't have (all asics).
826  */
827 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
828 {
829         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
830                   reg, v);
831         BUG();
832 }
833
834 /**
835  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
836  *
837  * @adev: amdgpu_device pointer
838  * @reg: offset of register
839  *
840  * Dummy register read function.  Used for register blocks
841  * that certain asics don't have (all asics).
842  * Returns the value in the register.
843  */
844 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
845 {
846         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
847         BUG();
848         return 0;
849 }
850
851 /**
852  * amdgpu_invalid_wreg64 - dummy reg write function
853  *
854  * @adev: amdgpu_device pointer
855  * @reg: offset of register
856  * @v: value to write to the register
857  *
858  * Dummy register read function.  Used for register blocks
859  * that certain asics don't have (all asics).
860  */
861 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
862 {
863         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
864                   reg, v);
865         BUG();
866 }
867
868 /**
869  * amdgpu_block_invalid_rreg - dummy reg read function
870  *
871  * @adev: amdgpu_device pointer
872  * @block: offset of instance
873  * @reg: offset of register
874  *
875  * Dummy register read function.  Used for register blocks
876  * that certain asics don't have (all asics).
877  * Returns the value in the register.
878  */
879 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
880                                           uint32_t block, uint32_t reg)
881 {
882         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
883                   reg, block);
884         BUG();
885         return 0;
886 }
887
888 /**
889  * amdgpu_block_invalid_wreg - dummy reg write function
890  *
891  * @adev: amdgpu_device pointer
892  * @block: offset of instance
893  * @reg: offset of register
894  * @v: value to write to the register
895  *
896  * Dummy register read function.  Used for register blocks
897  * that certain asics don't have (all asics).
898  */
899 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
900                                       uint32_t block,
901                                       uint32_t reg, uint32_t v)
902 {
903         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
904                   reg, block, v);
905         BUG();
906 }
907
908 /**
909  * amdgpu_device_asic_init - Wrapper for atom asic_init
910  *
911  * @adev: amdgpu_device pointer
912  *
913  * Does any asic specific work and then calls atom asic init.
914  */
915 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
916 {
917         amdgpu_asic_pre_asic_init(adev);
918
919         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
920                 return amdgpu_atomfirmware_asic_init(adev, true);
921         else
922                 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
923 }
924
925 /**
926  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
927  *
928  * @adev: amdgpu_device pointer
929  *
930  * Allocates a scratch page of VRAM for use by various things in the
931  * driver.
932  */
933 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
934 {
935         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
936                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
937                                        &adev->vram_scratch.robj,
938                                        &adev->vram_scratch.gpu_addr,
939                                        (void **)&adev->vram_scratch.ptr);
940 }
941
942 /**
943  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
944  *
945  * @adev: amdgpu_device pointer
946  *
947  * Frees the VRAM scratch page.
948  */
949 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
950 {
951         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
952 }
953
954 /**
955  * amdgpu_device_program_register_sequence - program an array of registers.
956  *
957  * @adev: amdgpu_device pointer
958  * @registers: pointer to the register array
959  * @array_size: size of the register array
960  *
961  * Programs an array or registers with and and or masks.
962  * This is a helper for setting golden registers.
963  */
964 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
965                                              const u32 *registers,
966                                              const u32 array_size)
967 {
968         u32 tmp, reg, and_mask, or_mask;
969         int i;
970
971         if (array_size % 3)
972                 return;
973
974         for (i = 0; i < array_size; i +=3) {
975                 reg = registers[i + 0];
976                 and_mask = registers[i + 1];
977                 or_mask = registers[i + 2];
978
979                 if (and_mask == 0xffffffff) {
980                         tmp = or_mask;
981                 } else {
982                         tmp = RREG32(reg);
983                         tmp &= ~and_mask;
984                         if (adev->family >= AMDGPU_FAMILY_AI)
985                                 tmp |= (or_mask & and_mask);
986                         else
987                                 tmp |= or_mask;
988                 }
989                 WREG32(reg, tmp);
990         }
991 }
992
993 /**
994  * amdgpu_device_pci_config_reset - reset the GPU
995  *
996  * @adev: amdgpu_device pointer
997  *
998  * Resets the GPU using the pci config reset sequence.
999  * Only applicable to asics prior to vega10.
1000  */
1001 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1002 {
1003         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1004 }
1005
1006 /**
1007  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1008  *
1009  * @adev: amdgpu_device pointer
1010  *
1011  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1012  */
1013 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1014 {
1015         return pci_reset_function(adev->pdev);
1016 }
1017
1018 /*
1019  * GPU doorbell aperture helpers function.
1020  */
1021 /**
1022  * amdgpu_device_doorbell_init - Init doorbell driver information.
1023  *
1024  * @adev: amdgpu_device pointer
1025  *
1026  * Init doorbell driver information (CIK)
1027  * Returns 0 on success, error on failure.
1028  */
1029 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1030 {
1031
1032         /* No doorbell on SI hardware generation */
1033         if (adev->asic_type < CHIP_BONAIRE) {
1034                 adev->doorbell.base = 0;
1035                 adev->doorbell.size = 0;
1036                 adev->doorbell.num_doorbells = 0;
1037                 adev->doorbell.ptr = NULL;
1038                 return 0;
1039         }
1040
1041         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1042                 return -EINVAL;
1043
1044         amdgpu_asic_init_doorbell_index(adev);
1045
1046         /* doorbell bar mapping */
1047         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1048         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1049
1050         if (adev->enable_mes) {
1051                 adev->doorbell.num_doorbells =
1052                         adev->doorbell.size / sizeof(u32);
1053         } else {
1054                 adev->doorbell.num_doorbells =
1055                         min_t(u32, adev->doorbell.size / sizeof(u32),
1056                               adev->doorbell_index.max_assignment+1);
1057                 if (adev->doorbell.num_doorbells == 0)
1058                         return -EINVAL;
1059
1060                 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1061                  * paging queue doorbell use the second page. The
1062                  * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1063                  * doorbells are in the first page. So with paging queue enabled,
1064                  * the max num_doorbells should + 1 page (0x400 in dword)
1065                  */
1066                 if (adev->asic_type >= CHIP_VEGA10)
1067                         adev->doorbell.num_doorbells += 0x400;
1068         }
1069
1070         adev->doorbell.ptr = ioremap(adev->doorbell.base,
1071                                      adev->doorbell.num_doorbells *
1072                                      sizeof(u32));
1073         if (adev->doorbell.ptr == NULL)
1074                 return -ENOMEM;
1075
1076         return 0;
1077 }
1078
1079 /**
1080  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1081  *
1082  * @adev: amdgpu_device pointer
1083  *
1084  * Tear down doorbell driver information (CIK)
1085  */
1086 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1087 {
1088         iounmap(adev->doorbell.ptr);
1089         adev->doorbell.ptr = NULL;
1090 }
1091
1092
1093
1094 /*
1095  * amdgpu_device_wb_*()
1096  * Writeback is the method by which the GPU updates special pages in memory
1097  * with the status of certain GPU events (fences, ring pointers,etc.).
1098  */
1099
1100 /**
1101  * amdgpu_device_wb_fini - Disable Writeback and free memory
1102  *
1103  * @adev: amdgpu_device pointer
1104  *
1105  * Disables Writeback and frees the Writeback memory (all asics).
1106  * Used at driver shutdown.
1107  */
1108 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1109 {
1110         if (adev->wb.wb_obj) {
1111                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1112                                       &adev->wb.gpu_addr,
1113                                       (void **)&adev->wb.wb);
1114                 adev->wb.wb_obj = NULL;
1115         }
1116 }
1117
1118 /**
1119  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1120  *
1121  * @adev: amdgpu_device pointer
1122  *
1123  * Initializes writeback and allocates writeback memory (all asics).
1124  * Used at driver startup.
1125  * Returns 0 on success or an -error on failure.
1126  */
1127 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1128 {
1129         int r;
1130
1131         if (adev->wb.wb_obj == NULL) {
1132                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1133                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1134                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1135                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1136                                             (void **)&adev->wb.wb);
1137                 if (r) {
1138                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1139                         return r;
1140                 }
1141
1142                 adev->wb.num_wb = AMDGPU_MAX_WB;
1143                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1144
1145                 /* clear wb memory */
1146                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1147         }
1148
1149         return 0;
1150 }
1151
1152 /**
1153  * amdgpu_device_wb_get - Allocate a wb entry
1154  *
1155  * @adev: amdgpu_device pointer
1156  * @wb: wb index
1157  *
1158  * Allocate a wb slot for use by the driver (all asics).
1159  * Returns 0 on success or -EINVAL on failure.
1160  */
1161 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1162 {
1163         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1164
1165         if (offset < adev->wb.num_wb) {
1166                 __set_bit(offset, adev->wb.used);
1167                 *wb = offset << 3; /* convert to dw offset */
1168                 return 0;
1169         } else {
1170                 return -EINVAL;
1171         }
1172 }
1173
1174 /**
1175  * amdgpu_device_wb_free - Free a wb entry
1176  *
1177  * @adev: amdgpu_device pointer
1178  * @wb: wb index
1179  *
1180  * Free a wb slot allocated for use by the driver (all asics)
1181  */
1182 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1183 {
1184         wb >>= 3;
1185         if (wb < adev->wb.num_wb)
1186                 __clear_bit(wb, adev->wb.used);
1187 }
1188
1189 /**
1190  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1191  *
1192  * @adev: amdgpu_device pointer
1193  *
1194  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1195  * to fail, but if any of the BARs is not accessible after the size we abort
1196  * driver loading by returning -ENODEV.
1197  */
1198 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1199 {
1200         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1201         struct pci_bus *root;
1202         struct resource *res;
1203         unsigned i;
1204         u16 cmd;
1205         int r;
1206
1207         /* Bypass for VF */
1208         if (amdgpu_sriov_vf(adev))
1209                 return 0;
1210
1211         /* skip if the bios has already enabled large BAR */
1212         if (adev->gmc.real_vram_size &&
1213             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1214                 return 0;
1215
1216         /* Check if the root BUS has 64bit memory resources */
1217         root = adev->pdev->bus;
1218         while (root->parent)
1219                 root = root->parent;
1220
1221         pci_bus_for_each_resource(root, res, i) {
1222                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1223                     res->start > 0x100000000ull)
1224                         break;
1225         }
1226
1227         /* Trying to resize is pointless without a root hub window above 4GB */
1228         if (!res)
1229                 return 0;
1230
1231         /* Limit the BAR size to what is available */
1232         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1233                         rbar_size);
1234
1235         /* Disable memory decoding while we change the BAR addresses and size */
1236         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1237         pci_write_config_word(adev->pdev, PCI_COMMAND,
1238                               cmd & ~PCI_COMMAND_MEMORY);
1239
1240         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1241         amdgpu_device_doorbell_fini(adev);
1242         if (adev->asic_type >= CHIP_BONAIRE)
1243                 pci_release_resource(adev->pdev, 2);
1244
1245         pci_release_resource(adev->pdev, 0);
1246
1247         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1248         if (r == -ENOSPC)
1249                 DRM_INFO("Not enough PCI address space for a large BAR.");
1250         else if (r && r != -ENOTSUPP)
1251                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1252
1253         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1254
1255         /* When the doorbell or fb BAR isn't available we have no chance of
1256          * using the device.
1257          */
1258         r = amdgpu_device_doorbell_init(adev);
1259         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1260                 return -ENODEV;
1261
1262         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1263
1264         return 0;
1265 }
1266
1267 /*
1268  * GPU helpers function.
1269  */
1270 /**
1271  * amdgpu_device_need_post - check if the hw need post or not
1272  *
1273  * @adev: amdgpu_device pointer
1274  *
1275  * Check if the asic has been initialized (all asics) at driver startup
1276  * or post is needed if  hw reset is performed.
1277  * Returns true if need or false if not.
1278  */
1279 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1280 {
1281         uint32_t reg;
1282
1283         if (amdgpu_sriov_vf(adev))
1284                 return false;
1285
1286         if (amdgpu_passthrough(adev)) {
1287                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1288                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1289                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1290                  * vpost executed for smc version below 22.15
1291                  */
1292                 if (adev->asic_type == CHIP_FIJI) {
1293                         int err;
1294                         uint32_t fw_ver;
1295                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1296                         /* force vPost if error occured */
1297                         if (err)
1298                                 return true;
1299
1300                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1301                         if (fw_ver < 0x00160e00)
1302                                 return true;
1303                 }
1304         }
1305
1306         /* Don't post if we need to reset whole hive on init */
1307         if (adev->gmc.xgmi.pending_reset)
1308                 return false;
1309
1310         if (adev->has_hw_reset) {
1311                 adev->has_hw_reset = false;
1312                 return true;
1313         }
1314
1315         /* bios scratch used on CIK+ */
1316         if (adev->asic_type >= CHIP_BONAIRE)
1317                 return amdgpu_atombios_scratch_need_asic_init(adev);
1318
1319         /* check MEM_SIZE for older asics */
1320         reg = amdgpu_asic_get_config_memsize(adev);
1321
1322         if ((reg != 0) && (reg != 0xffffffff))
1323                 return false;
1324
1325         return true;
1326 }
1327
1328 /**
1329  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1330  *
1331  * @adev: amdgpu_device pointer
1332  *
1333  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1334  * be set for this device.
1335  *
1336  * Returns true if it should be used or false if not.
1337  */
1338 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1339 {
1340         switch (amdgpu_aspm) {
1341         case -1:
1342                 break;
1343         case 0:
1344                 return false;
1345         case 1:
1346                 return true;
1347         default:
1348                 return false;
1349         }
1350         return pcie_aspm_enabled(adev->pdev);
1351 }
1352
1353 /* if we get transitioned to only one device, take VGA back */
1354 /**
1355  * amdgpu_device_vga_set_decode - enable/disable vga decode
1356  *
1357  * @pdev: PCI device pointer
1358  * @state: enable/disable vga decode
1359  *
1360  * Enable/disable vga decode (all asics).
1361  * Returns VGA resource flags.
1362  */
1363 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1364                 bool state)
1365 {
1366         struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1367         amdgpu_asic_set_vga_state(adev, state);
1368         if (state)
1369                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1370                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1371         else
1372                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1373 }
1374
1375 /**
1376  * amdgpu_device_check_block_size - validate the vm block size
1377  *
1378  * @adev: amdgpu_device pointer
1379  *
1380  * Validates the vm block size specified via module parameter.
1381  * The vm block size defines number of bits in page table versus page directory,
1382  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1383  * page table and the remaining bits are in the page directory.
1384  */
1385 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1386 {
1387         /* defines number of bits in page table versus page directory,
1388          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1389          * page table and the remaining bits are in the page directory */
1390         if (amdgpu_vm_block_size == -1)
1391                 return;
1392
1393         if (amdgpu_vm_block_size < 9) {
1394                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1395                          amdgpu_vm_block_size);
1396                 amdgpu_vm_block_size = -1;
1397         }
1398 }
1399
1400 /**
1401  * amdgpu_device_check_vm_size - validate the vm size
1402  *
1403  * @adev: amdgpu_device pointer
1404  *
1405  * Validates the vm size in GB specified via module parameter.
1406  * The VM size is the size of the GPU virtual memory space in GB.
1407  */
1408 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1409 {
1410         /* no need to check the default value */
1411         if (amdgpu_vm_size == -1)
1412                 return;
1413
1414         if (amdgpu_vm_size < 1) {
1415                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1416                          amdgpu_vm_size);
1417                 amdgpu_vm_size = -1;
1418         }
1419 }
1420
1421 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1422 {
1423         struct sysinfo si;
1424         bool is_os_64 = (sizeof(void *) == 8);
1425         uint64_t total_memory;
1426         uint64_t dram_size_seven_GB = 0x1B8000000;
1427         uint64_t dram_size_three_GB = 0xB8000000;
1428
1429         if (amdgpu_smu_memory_pool_size == 0)
1430                 return;
1431
1432         if (!is_os_64) {
1433                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1434                 goto def_value;
1435         }
1436         si_meminfo(&si);
1437         total_memory = (uint64_t)si.totalram * si.mem_unit;
1438
1439         if ((amdgpu_smu_memory_pool_size == 1) ||
1440                 (amdgpu_smu_memory_pool_size == 2)) {
1441                 if (total_memory < dram_size_three_GB)
1442                         goto def_value1;
1443         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1444                 (amdgpu_smu_memory_pool_size == 8)) {
1445                 if (total_memory < dram_size_seven_GB)
1446                         goto def_value1;
1447         } else {
1448                 DRM_WARN("Smu memory pool size not supported\n");
1449                 goto def_value;
1450         }
1451         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1452
1453         return;
1454
1455 def_value1:
1456         DRM_WARN("No enough system memory\n");
1457 def_value:
1458         adev->pm.smu_prv_buffer_size = 0;
1459 }
1460
1461 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1462 {
1463         if (!(adev->flags & AMD_IS_APU) ||
1464             adev->asic_type < CHIP_RAVEN)
1465                 return 0;
1466
1467         switch (adev->asic_type) {
1468         case CHIP_RAVEN:
1469                 if (adev->pdev->device == 0x15dd)
1470                         adev->apu_flags |= AMD_APU_IS_RAVEN;
1471                 if (adev->pdev->device == 0x15d8)
1472                         adev->apu_flags |= AMD_APU_IS_PICASSO;
1473                 break;
1474         case CHIP_RENOIR:
1475                 if ((adev->pdev->device == 0x1636) ||
1476                     (adev->pdev->device == 0x164c))
1477                         adev->apu_flags |= AMD_APU_IS_RENOIR;
1478                 else
1479                         adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1480                 break;
1481         case CHIP_VANGOGH:
1482                 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1483                 break;
1484         case CHIP_YELLOW_CARP:
1485                 break;
1486         case CHIP_CYAN_SKILLFISH:
1487                 if ((adev->pdev->device == 0x13FE) ||
1488                     (adev->pdev->device == 0x143F))
1489                         adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1490                 break;
1491         default:
1492                 break;
1493         }
1494
1495         return 0;
1496 }
1497
1498 /**
1499  * amdgpu_device_check_arguments - validate module params
1500  *
1501  * @adev: amdgpu_device pointer
1502  *
1503  * Validates certain module parameters and updates
1504  * the associated values used by the driver (all asics).
1505  */
1506 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1507 {
1508         if (amdgpu_sched_jobs < 4) {
1509                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1510                          amdgpu_sched_jobs);
1511                 amdgpu_sched_jobs = 4;
1512         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1513                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1514                          amdgpu_sched_jobs);
1515                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1516         }
1517
1518         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1519                 /* gart size must be greater or equal to 32M */
1520                 dev_warn(adev->dev, "gart size (%d) too small\n",
1521                          amdgpu_gart_size);
1522                 amdgpu_gart_size = -1;
1523         }
1524
1525         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1526                 /* gtt size must be greater or equal to 32M */
1527                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1528                                  amdgpu_gtt_size);
1529                 amdgpu_gtt_size = -1;
1530         }
1531
1532         /* valid range is between 4 and 9 inclusive */
1533         if (amdgpu_vm_fragment_size != -1 &&
1534             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1535                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1536                 amdgpu_vm_fragment_size = -1;
1537         }
1538
1539         if (amdgpu_sched_hw_submission < 2) {
1540                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1541                          amdgpu_sched_hw_submission);
1542                 amdgpu_sched_hw_submission = 2;
1543         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1544                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1545                          amdgpu_sched_hw_submission);
1546                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1547         }
1548
1549         if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1550                 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1551                 amdgpu_reset_method = -1;
1552         }
1553
1554         amdgpu_device_check_smu_prv_buffer_size(adev);
1555
1556         amdgpu_device_check_vm_size(adev);
1557
1558         amdgpu_device_check_block_size(adev);
1559
1560         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1561
1562         return 0;
1563 }
1564
1565 /**
1566  * amdgpu_switcheroo_set_state - set switcheroo state
1567  *
1568  * @pdev: pci dev pointer
1569  * @state: vga_switcheroo state
1570  *
1571  * Callback for the switcheroo driver.  Suspends or resumes the
1572  * the asics before or after it is powered up using ACPI methods.
1573  */
1574 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1575                                         enum vga_switcheroo_state state)
1576 {
1577         struct drm_device *dev = pci_get_drvdata(pdev);
1578         int r;
1579
1580         if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1581                 return;
1582
1583         if (state == VGA_SWITCHEROO_ON) {
1584                 pr_info("switched on\n");
1585                 /* don't suspend or resume card normally */
1586                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1587
1588                 pci_set_power_state(pdev, PCI_D0);
1589                 amdgpu_device_load_pci_state(pdev);
1590                 r = pci_enable_device(pdev);
1591                 if (r)
1592                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1593                 amdgpu_device_resume(dev, true);
1594
1595                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1596         } else {
1597                 pr_info("switched off\n");
1598                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1599                 amdgpu_device_suspend(dev, true);
1600                 amdgpu_device_cache_pci_state(pdev);
1601                 /* Shut down the device */
1602                 pci_disable_device(pdev);
1603                 pci_set_power_state(pdev, PCI_D3cold);
1604                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1605         }
1606 }
1607
1608 /**
1609  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1610  *
1611  * @pdev: pci dev pointer
1612  *
1613  * Callback for the switcheroo driver.  Check of the switcheroo
1614  * state can be changed.
1615  * Returns true if the state can be changed, false if not.
1616  */
1617 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1618 {
1619         struct drm_device *dev = pci_get_drvdata(pdev);
1620
1621         /*
1622         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1623         * locking inversion with the driver load path. And the access here is
1624         * completely racy anyway. So don't bother with locking for now.
1625         */
1626         return atomic_read(&dev->open_count) == 0;
1627 }
1628
1629 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1630         .set_gpu_state = amdgpu_switcheroo_set_state,
1631         .reprobe = NULL,
1632         .can_switch = amdgpu_switcheroo_can_switch,
1633 };
1634
1635 /**
1636  * amdgpu_device_ip_set_clockgating_state - set the CG state
1637  *
1638  * @dev: amdgpu_device pointer
1639  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1640  * @state: clockgating state (gate or ungate)
1641  *
1642  * Sets the requested clockgating state for all instances of
1643  * the hardware IP specified.
1644  * Returns the error code from the last instance.
1645  */
1646 int amdgpu_device_ip_set_clockgating_state(void *dev,
1647                                            enum amd_ip_block_type block_type,
1648                                            enum amd_clockgating_state state)
1649 {
1650         struct amdgpu_device *adev = dev;
1651         int i, r = 0;
1652
1653         for (i = 0; i < adev->num_ip_blocks; i++) {
1654                 if (!adev->ip_blocks[i].status.valid)
1655                         continue;
1656                 if (adev->ip_blocks[i].version->type != block_type)
1657                         continue;
1658                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1659                         continue;
1660                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1661                         (void *)adev, state);
1662                 if (r)
1663                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1664                                   adev->ip_blocks[i].version->funcs->name, r);
1665         }
1666         return r;
1667 }
1668
1669 /**
1670  * amdgpu_device_ip_set_powergating_state - set the PG state
1671  *
1672  * @dev: amdgpu_device pointer
1673  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1674  * @state: powergating state (gate or ungate)
1675  *
1676  * Sets the requested powergating state for all instances of
1677  * the hardware IP specified.
1678  * Returns the error code from the last instance.
1679  */
1680 int amdgpu_device_ip_set_powergating_state(void *dev,
1681                                            enum amd_ip_block_type block_type,
1682                                            enum amd_powergating_state state)
1683 {
1684         struct amdgpu_device *adev = dev;
1685         int i, r = 0;
1686
1687         for (i = 0; i < adev->num_ip_blocks; i++) {
1688                 if (!adev->ip_blocks[i].status.valid)
1689                         continue;
1690                 if (adev->ip_blocks[i].version->type != block_type)
1691                         continue;
1692                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1693                         continue;
1694                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1695                         (void *)adev, state);
1696                 if (r)
1697                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1698                                   adev->ip_blocks[i].version->funcs->name, r);
1699         }
1700         return r;
1701 }
1702
1703 /**
1704  * amdgpu_device_ip_get_clockgating_state - get the CG state
1705  *
1706  * @adev: amdgpu_device pointer
1707  * @flags: clockgating feature flags
1708  *
1709  * Walks the list of IPs on the device and updates the clockgating
1710  * flags for each IP.
1711  * Updates @flags with the feature flags for each hardware IP where
1712  * clockgating is enabled.
1713  */
1714 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1715                                             u64 *flags)
1716 {
1717         int i;
1718
1719         for (i = 0; i < adev->num_ip_blocks; i++) {
1720                 if (!adev->ip_blocks[i].status.valid)
1721                         continue;
1722                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1723                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1724         }
1725 }
1726
1727 /**
1728  * amdgpu_device_ip_wait_for_idle - wait for idle
1729  *
1730  * @adev: amdgpu_device pointer
1731  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1732  *
1733  * Waits for the request hardware IP to be idle.
1734  * Returns 0 for success or a negative error code on failure.
1735  */
1736 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1737                                    enum amd_ip_block_type block_type)
1738 {
1739         int i, r;
1740
1741         for (i = 0; i < adev->num_ip_blocks; i++) {
1742                 if (!adev->ip_blocks[i].status.valid)
1743                         continue;
1744                 if (adev->ip_blocks[i].version->type == block_type) {
1745                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1746                         if (r)
1747                                 return r;
1748                         break;
1749                 }
1750         }
1751         return 0;
1752
1753 }
1754
1755 /**
1756  * amdgpu_device_ip_is_idle - is the hardware IP idle
1757  *
1758  * @adev: amdgpu_device pointer
1759  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1760  *
1761  * Check if the hardware IP is idle or not.
1762  * Returns true if it the IP is idle, false if not.
1763  */
1764 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1765                               enum amd_ip_block_type block_type)
1766 {
1767         int i;
1768
1769         for (i = 0; i < adev->num_ip_blocks; i++) {
1770                 if (!adev->ip_blocks[i].status.valid)
1771                         continue;
1772                 if (adev->ip_blocks[i].version->type == block_type)
1773                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1774         }
1775         return true;
1776
1777 }
1778
1779 /**
1780  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1781  *
1782  * @adev: amdgpu_device pointer
1783  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1784  *
1785  * Returns a pointer to the hardware IP block structure
1786  * if it exists for the asic, otherwise NULL.
1787  */
1788 struct amdgpu_ip_block *
1789 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1790                               enum amd_ip_block_type type)
1791 {
1792         int i;
1793
1794         for (i = 0; i < adev->num_ip_blocks; i++)
1795                 if (adev->ip_blocks[i].version->type == type)
1796                         return &adev->ip_blocks[i];
1797
1798         return NULL;
1799 }
1800
1801 /**
1802  * amdgpu_device_ip_block_version_cmp
1803  *
1804  * @adev: amdgpu_device pointer
1805  * @type: enum amd_ip_block_type
1806  * @major: major version
1807  * @minor: minor version
1808  *
1809  * return 0 if equal or greater
1810  * return 1 if smaller or the ip_block doesn't exist
1811  */
1812 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1813                                        enum amd_ip_block_type type,
1814                                        u32 major, u32 minor)
1815 {
1816         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1817
1818         if (ip_block && ((ip_block->version->major > major) ||
1819                         ((ip_block->version->major == major) &&
1820                         (ip_block->version->minor >= minor))))
1821                 return 0;
1822
1823         return 1;
1824 }
1825
1826 /**
1827  * amdgpu_device_ip_block_add
1828  *
1829  * @adev: amdgpu_device pointer
1830  * @ip_block_version: pointer to the IP to add
1831  *
1832  * Adds the IP block driver information to the collection of IPs
1833  * on the asic.
1834  */
1835 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1836                                const struct amdgpu_ip_block_version *ip_block_version)
1837 {
1838         if (!ip_block_version)
1839                 return -EINVAL;
1840
1841         switch (ip_block_version->type) {
1842         case AMD_IP_BLOCK_TYPE_VCN:
1843                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1844                         return 0;
1845                 break;
1846         case AMD_IP_BLOCK_TYPE_JPEG:
1847                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1848                         return 0;
1849                 break;
1850         default:
1851                 break;
1852         }
1853
1854         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1855                   ip_block_version->funcs->name);
1856
1857         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1858
1859         return 0;
1860 }
1861
1862 /**
1863  * amdgpu_device_enable_virtual_display - enable virtual display feature
1864  *
1865  * @adev: amdgpu_device pointer
1866  *
1867  * Enabled the virtual display feature if the user has enabled it via
1868  * the module parameter virtual_display.  This feature provides a virtual
1869  * display hardware on headless boards or in virtualized environments.
1870  * This function parses and validates the configuration string specified by
1871  * the user and configues the virtual display configuration (number of
1872  * virtual connectors, crtcs, etc.) specified.
1873  */
1874 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1875 {
1876         adev->enable_virtual_display = false;
1877
1878         if (amdgpu_virtual_display) {
1879                 const char *pci_address_name = pci_name(adev->pdev);
1880                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1881
1882                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1883                 pciaddstr_tmp = pciaddstr;
1884                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1885                         pciaddname = strsep(&pciaddname_tmp, ",");
1886                         if (!strcmp("all", pciaddname)
1887                             || !strcmp(pci_address_name, pciaddname)) {
1888                                 long num_crtc;
1889                                 int res = -1;
1890
1891                                 adev->enable_virtual_display = true;
1892
1893                                 if (pciaddname_tmp)
1894                                         res = kstrtol(pciaddname_tmp, 10,
1895                                                       &num_crtc);
1896
1897                                 if (!res) {
1898                                         if (num_crtc < 1)
1899                                                 num_crtc = 1;
1900                                         if (num_crtc > 6)
1901                                                 num_crtc = 6;
1902                                         adev->mode_info.num_crtc = num_crtc;
1903                                 } else {
1904                                         adev->mode_info.num_crtc = 1;
1905                                 }
1906                                 break;
1907                         }
1908                 }
1909
1910                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1911                          amdgpu_virtual_display, pci_address_name,
1912                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1913
1914                 kfree(pciaddstr);
1915         }
1916 }
1917
1918 /**
1919  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1920  *
1921  * @adev: amdgpu_device pointer
1922  *
1923  * Parses the asic configuration parameters specified in the gpu info
1924  * firmware and makes them availale to the driver for use in configuring
1925  * the asic.
1926  * Returns 0 on success, -EINVAL on failure.
1927  */
1928 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1929 {
1930         const char *chip_name;
1931         char fw_name[40];
1932         int err;
1933         const struct gpu_info_firmware_header_v1_0 *hdr;
1934
1935         adev->firmware.gpu_info_fw = NULL;
1936
1937         if (adev->mman.discovery_bin) {
1938                 /*
1939                  * FIXME: The bounding box is still needed by Navi12, so
1940                  * temporarily read it from gpu_info firmware. Should be dropped
1941                  * when DAL no longer needs it.
1942                  */
1943                 if (adev->asic_type != CHIP_NAVI12)
1944                         return 0;
1945         }
1946
1947         switch (adev->asic_type) {
1948         default:
1949                 return 0;
1950         case CHIP_VEGA10:
1951                 chip_name = "vega10";
1952                 break;
1953         case CHIP_VEGA12:
1954                 chip_name = "vega12";
1955                 break;
1956         case CHIP_RAVEN:
1957                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1958                         chip_name = "raven2";
1959                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1960                         chip_name = "picasso";
1961                 else
1962                         chip_name = "raven";
1963                 break;
1964         case CHIP_ARCTURUS:
1965                 chip_name = "arcturus";
1966                 break;
1967         case CHIP_NAVI12:
1968                 chip_name = "navi12";
1969                 break;
1970         }
1971
1972         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1973         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1974         if (err) {
1975                 dev_err(adev->dev,
1976                         "Failed to load gpu_info firmware \"%s\"\n",
1977                         fw_name);
1978                 goto out;
1979         }
1980         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1981         if (err) {
1982                 dev_err(adev->dev,
1983                         "Failed to validate gpu_info firmware \"%s\"\n",
1984                         fw_name);
1985                 goto out;
1986         }
1987
1988         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1989         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1990
1991         switch (hdr->version_major) {
1992         case 1:
1993         {
1994                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1995                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1996                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1997
1998                 /*
1999                  * Should be droped when DAL no longer needs it.
2000                  */
2001                 if (adev->asic_type == CHIP_NAVI12)
2002                         goto parse_soc_bounding_box;
2003
2004                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2005                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2006                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2007                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2008                 adev->gfx.config.max_texture_channel_caches =
2009                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
2010                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2011                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2012                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2013                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2014                 adev->gfx.config.double_offchip_lds_buf =
2015                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2016                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2017                 adev->gfx.cu_info.max_waves_per_simd =
2018                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2019                 adev->gfx.cu_info.max_scratch_slots_per_cu =
2020                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2021                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2022                 if (hdr->version_minor >= 1) {
2023                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2024                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2025                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2026                         adev->gfx.config.num_sc_per_sh =
2027                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2028                         adev->gfx.config.num_packer_per_sc =
2029                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2030                 }
2031
2032 parse_soc_bounding_box:
2033                 /*
2034                  * soc bounding box info is not integrated in disocovery table,
2035                  * we always need to parse it from gpu info firmware if needed.
2036                  */
2037                 if (hdr->version_minor == 2) {
2038                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2039                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2040                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2041                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2042                 }
2043                 break;
2044         }
2045         default:
2046                 dev_err(adev->dev,
2047                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2048                 err = -EINVAL;
2049                 goto out;
2050         }
2051 out:
2052         return err;
2053 }
2054
2055 /**
2056  * amdgpu_device_ip_early_init - run early init for hardware IPs
2057  *
2058  * @adev: amdgpu_device pointer
2059  *
2060  * Early initialization pass for hardware IPs.  The hardware IPs that make
2061  * up each asic are discovered each IP's early_init callback is run.  This
2062  * is the first stage in initializing the asic.
2063  * Returns 0 on success, negative error code on failure.
2064  */
2065 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2066 {
2067         struct drm_device *dev = adev_to_drm(adev);
2068         struct pci_dev *parent;
2069         int i, r;
2070
2071         amdgpu_device_enable_virtual_display(adev);
2072
2073         if (amdgpu_sriov_vf(adev)) {
2074                 r = amdgpu_virt_request_full_gpu(adev, true);
2075                 if (r)
2076                         return r;
2077         }
2078
2079         switch (adev->asic_type) {
2080 #ifdef CONFIG_DRM_AMDGPU_SI
2081         case CHIP_VERDE:
2082         case CHIP_TAHITI:
2083         case CHIP_PITCAIRN:
2084         case CHIP_OLAND:
2085         case CHIP_HAINAN:
2086                 adev->family = AMDGPU_FAMILY_SI;
2087                 r = si_set_ip_blocks(adev);
2088                 if (r)
2089                         return r;
2090                 break;
2091 #endif
2092 #ifdef CONFIG_DRM_AMDGPU_CIK
2093         case CHIP_BONAIRE:
2094         case CHIP_HAWAII:
2095         case CHIP_KAVERI:
2096         case CHIP_KABINI:
2097         case CHIP_MULLINS:
2098                 if (adev->flags & AMD_IS_APU)
2099                         adev->family = AMDGPU_FAMILY_KV;
2100                 else
2101                         adev->family = AMDGPU_FAMILY_CI;
2102
2103                 r = cik_set_ip_blocks(adev);
2104                 if (r)
2105                         return r;
2106                 break;
2107 #endif
2108         case CHIP_TOPAZ:
2109         case CHIP_TONGA:
2110         case CHIP_FIJI:
2111         case CHIP_POLARIS10:
2112         case CHIP_POLARIS11:
2113         case CHIP_POLARIS12:
2114         case CHIP_VEGAM:
2115         case CHIP_CARRIZO:
2116         case CHIP_STONEY:
2117                 if (adev->flags & AMD_IS_APU)
2118                         adev->family = AMDGPU_FAMILY_CZ;
2119                 else
2120                         adev->family = AMDGPU_FAMILY_VI;
2121
2122                 r = vi_set_ip_blocks(adev);
2123                 if (r)
2124                         return r;
2125                 break;
2126         default:
2127                 r = amdgpu_discovery_set_ip_blocks(adev);
2128                 if (r)
2129                         return r;
2130                 break;
2131         }
2132
2133         if (amdgpu_has_atpx() &&
2134             (amdgpu_is_atpx_hybrid() ||
2135              amdgpu_has_atpx_dgpu_power_cntl()) &&
2136             ((adev->flags & AMD_IS_APU) == 0) &&
2137             !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2138                 adev->flags |= AMD_IS_PX;
2139
2140         if (!(adev->flags & AMD_IS_APU)) {
2141                 parent = pci_upstream_bridge(adev->pdev);
2142                 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2143         }
2144
2145         amdgpu_amdkfd_device_probe(adev);
2146
2147         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2148         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2149                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2150         if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2151                 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2152
2153         for (i = 0; i < adev->num_ip_blocks; i++) {
2154                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2155                         DRM_ERROR("disabled ip block: %d <%s>\n",
2156                                   i, adev->ip_blocks[i].version->funcs->name);
2157                         adev->ip_blocks[i].status.valid = false;
2158                 } else {
2159                         if (adev->ip_blocks[i].version->funcs->early_init) {
2160                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2161                                 if (r == -ENOENT) {
2162                                         adev->ip_blocks[i].status.valid = false;
2163                                 } else if (r) {
2164                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2165                                                   adev->ip_blocks[i].version->funcs->name, r);
2166                                         return r;
2167                                 } else {
2168                                         adev->ip_blocks[i].status.valid = true;
2169                                 }
2170                         } else {
2171                                 adev->ip_blocks[i].status.valid = true;
2172                         }
2173                 }
2174                 /* get the vbios after the asic_funcs are set up */
2175                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2176                         r = amdgpu_device_parse_gpu_info_fw(adev);
2177                         if (r)
2178                                 return r;
2179
2180                         /* Read BIOS */
2181                         if (!amdgpu_get_bios(adev))
2182                                 return -EINVAL;
2183
2184                         r = amdgpu_atombios_init(adev);
2185                         if (r) {
2186                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2187                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2188                                 return r;
2189                         }
2190
2191                         /*get pf2vf msg info at it's earliest time*/
2192                         if (amdgpu_sriov_vf(adev))
2193                                 amdgpu_virt_init_data_exchange(adev);
2194
2195                 }
2196         }
2197
2198         adev->cg_flags &= amdgpu_cg_mask;
2199         adev->pg_flags &= amdgpu_pg_mask;
2200
2201         return 0;
2202 }
2203
2204 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2205 {
2206         int i, r;
2207
2208         for (i = 0; i < adev->num_ip_blocks; i++) {
2209                 if (!adev->ip_blocks[i].status.sw)
2210                         continue;
2211                 if (adev->ip_blocks[i].status.hw)
2212                         continue;
2213                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2214                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2215                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2216                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2217                         if (r) {
2218                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2219                                           adev->ip_blocks[i].version->funcs->name, r);
2220                                 return r;
2221                         }
2222                         adev->ip_blocks[i].status.hw = true;
2223                 }
2224         }
2225
2226         return 0;
2227 }
2228
2229 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2230 {
2231         int i, r;
2232
2233         for (i = 0; i < adev->num_ip_blocks; i++) {
2234                 if (!adev->ip_blocks[i].status.sw)
2235                         continue;
2236                 if (adev->ip_blocks[i].status.hw)
2237                         continue;
2238                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2239                 if (r) {
2240                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2241                                   adev->ip_blocks[i].version->funcs->name, r);
2242                         return r;
2243                 }
2244                 adev->ip_blocks[i].status.hw = true;
2245         }
2246
2247         return 0;
2248 }
2249
2250 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2251 {
2252         int r = 0;
2253         int i;
2254         uint32_t smu_version;
2255
2256         if (adev->asic_type >= CHIP_VEGA10) {
2257                 for (i = 0; i < adev->num_ip_blocks; i++) {
2258                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2259                                 continue;
2260
2261                         if (!adev->ip_blocks[i].status.sw)
2262                                 continue;
2263
2264                         /* no need to do the fw loading again if already done*/
2265                         if (adev->ip_blocks[i].status.hw == true)
2266                                 break;
2267
2268                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2269                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2270                                 if (r) {
2271                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2272                                                           adev->ip_blocks[i].version->funcs->name, r);
2273                                         return r;
2274                                 }
2275                         } else {
2276                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2277                                 if (r) {
2278                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2279                                                           adev->ip_blocks[i].version->funcs->name, r);
2280                                         return r;
2281                                 }
2282                         }
2283
2284                         adev->ip_blocks[i].status.hw = true;
2285                         break;
2286                 }
2287         }
2288
2289         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2290                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2291
2292         return r;
2293 }
2294
2295 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2296 {
2297         long timeout;
2298         int r, i;
2299
2300         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2301                 struct amdgpu_ring *ring = adev->rings[i];
2302
2303                 /* No need to setup the GPU scheduler for rings that don't need it */
2304                 if (!ring || ring->no_scheduler)
2305                         continue;
2306
2307                 switch (ring->funcs->type) {
2308                 case AMDGPU_RING_TYPE_GFX:
2309                         timeout = adev->gfx_timeout;
2310                         break;
2311                 case AMDGPU_RING_TYPE_COMPUTE:
2312                         timeout = adev->compute_timeout;
2313                         break;
2314                 case AMDGPU_RING_TYPE_SDMA:
2315                         timeout = adev->sdma_timeout;
2316                         break;
2317                 default:
2318                         timeout = adev->video_timeout;
2319                         break;
2320                 }
2321
2322                 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2323                                    ring->num_hw_submission, amdgpu_job_hang_limit,
2324                                    timeout, adev->reset_domain->wq,
2325                                    ring->sched_score, ring->name,
2326                                    adev->dev);
2327                 if (r) {
2328                         DRM_ERROR("Failed to create scheduler on ring %s.\n",
2329                                   ring->name);
2330                         return r;
2331                 }
2332         }
2333
2334         return 0;
2335 }
2336
2337
2338 /**
2339  * amdgpu_device_ip_init - run init for hardware IPs
2340  *
2341  * @adev: amdgpu_device pointer
2342  *
2343  * Main initialization pass for hardware IPs.  The list of all the hardware
2344  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2345  * are run.  sw_init initializes the software state associated with each IP
2346  * and hw_init initializes the hardware associated with each IP.
2347  * Returns 0 on success, negative error code on failure.
2348  */
2349 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2350 {
2351         int i, r;
2352
2353         r = amdgpu_ras_init(adev);
2354         if (r)
2355                 return r;
2356
2357         for (i = 0; i < adev->num_ip_blocks; i++) {
2358                 if (!adev->ip_blocks[i].status.valid)
2359                         continue;
2360                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2361                 if (r) {
2362                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2363                                   adev->ip_blocks[i].version->funcs->name, r);
2364                         goto init_failed;
2365                 }
2366                 adev->ip_blocks[i].status.sw = true;
2367
2368                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2369                         /* need to do common hw init early so everything is set up for gmc */
2370                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2371                         if (r) {
2372                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2373                                 goto init_failed;
2374                         }
2375                         adev->ip_blocks[i].status.hw = true;
2376                 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2377                         /* need to do gmc hw init early so we can allocate gpu mem */
2378                         /* Try to reserve bad pages early */
2379                         if (amdgpu_sriov_vf(adev))
2380                                 amdgpu_virt_exchange_data(adev);
2381
2382                         r = amdgpu_device_vram_scratch_init(adev);
2383                         if (r) {
2384                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2385                                 goto init_failed;
2386                         }
2387                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2388                         if (r) {
2389                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2390                                 goto init_failed;
2391                         }
2392                         r = amdgpu_device_wb_init(adev);
2393                         if (r) {
2394                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2395                                 goto init_failed;
2396                         }
2397                         adev->ip_blocks[i].status.hw = true;
2398
2399                         /* right after GMC hw init, we create CSA */
2400                         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2401                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2402                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2403                                                                 AMDGPU_CSA_SIZE);
2404                                 if (r) {
2405                                         DRM_ERROR("allocate CSA failed %d\n", r);
2406                                         goto init_failed;
2407                                 }
2408                         }
2409                 }
2410         }
2411
2412         if (amdgpu_sriov_vf(adev))
2413                 amdgpu_virt_init_data_exchange(adev);
2414
2415         r = amdgpu_ib_pool_init(adev);
2416         if (r) {
2417                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2418                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2419                 goto init_failed;
2420         }
2421
2422         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2423         if (r)
2424                 goto init_failed;
2425
2426         r = amdgpu_device_ip_hw_init_phase1(adev);
2427         if (r)
2428                 goto init_failed;
2429
2430         r = amdgpu_device_fw_loading(adev);
2431         if (r)
2432                 goto init_failed;
2433
2434         r = amdgpu_device_ip_hw_init_phase2(adev);
2435         if (r)
2436                 goto init_failed;
2437
2438         /*
2439          * retired pages will be loaded from eeprom and reserved here,
2440          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2441          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2442          * for I2C communication which only true at this point.
2443          *
2444          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2445          * failure from bad gpu situation and stop amdgpu init process
2446          * accordingly. For other failed cases, it will still release all
2447          * the resource and print error message, rather than returning one
2448          * negative value to upper level.
2449          *
2450          * Note: theoretically, this should be called before all vram allocations
2451          * to protect retired page from abusing
2452          */
2453         r = amdgpu_ras_recovery_init(adev);
2454         if (r)
2455                 goto init_failed;
2456
2457         /**
2458          * In case of XGMI grab extra reference for reset domain for this device
2459          */
2460         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2461                 if (amdgpu_xgmi_add_device(adev) == 0) {
2462                         if (!amdgpu_sriov_vf(adev)) {
2463                                 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2464
2465                                 if (!hive->reset_domain ||
2466                                     !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2467                                         r = -ENOENT;
2468                                         amdgpu_put_xgmi_hive(hive);
2469                                         goto init_failed;
2470                                 }
2471
2472                                 /* Drop the early temporary reset domain we created for device */
2473                                 amdgpu_reset_put_reset_domain(adev->reset_domain);
2474                                 adev->reset_domain = hive->reset_domain;
2475                                 amdgpu_put_xgmi_hive(hive);
2476                         }
2477                 }
2478         }
2479
2480         r = amdgpu_device_init_schedulers(adev);
2481         if (r)
2482                 goto init_failed;
2483
2484         /* Don't init kfd if whole hive need to be reset during init */
2485         if (!adev->gmc.xgmi.pending_reset)
2486                 amdgpu_amdkfd_device_init(adev);
2487
2488         amdgpu_fru_get_product_info(adev);
2489
2490 init_failed:
2491         if (amdgpu_sriov_vf(adev))
2492                 amdgpu_virt_release_full_gpu(adev, true);
2493
2494         return r;
2495 }
2496
2497 /**
2498  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2499  *
2500  * @adev: amdgpu_device pointer
2501  *
2502  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2503  * this function before a GPU reset.  If the value is retained after a
2504  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2505  */
2506 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2507 {
2508         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2509 }
2510
2511 /**
2512  * amdgpu_device_check_vram_lost - check if vram is valid
2513  *
2514  * @adev: amdgpu_device pointer
2515  *
2516  * Checks the reset magic value written to the gart pointer in VRAM.
2517  * The driver calls this after a GPU reset to see if the contents of
2518  * VRAM is lost or now.
2519  * returns true if vram is lost, false if not.
2520  */
2521 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2522 {
2523         if (memcmp(adev->gart.ptr, adev->reset_magic,
2524                         AMDGPU_RESET_MAGIC_NUM))
2525                 return true;
2526
2527         if (!amdgpu_in_reset(adev))
2528                 return false;
2529
2530         /*
2531          * For all ASICs with baco/mode1 reset, the VRAM is
2532          * always assumed to be lost.
2533          */
2534         switch (amdgpu_asic_reset_method(adev)) {
2535         case AMD_RESET_METHOD_BACO:
2536         case AMD_RESET_METHOD_MODE1:
2537                 return true;
2538         default:
2539                 return false;
2540         }
2541 }
2542
2543 /**
2544  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2545  *
2546  * @adev: amdgpu_device pointer
2547  * @state: clockgating state (gate or ungate)
2548  *
2549  * The list of all the hardware IPs that make up the asic is walked and the
2550  * set_clockgating_state callbacks are run.
2551  * Late initialization pass enabling clockgating for hardware IPs.
2552  * Fini or suspend, pass disabling clockgating for hardware IPs.
2553  * Returns 0 on success, negative error code on failure.
2554  */
2555
2556 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2557                                enum amd_clockgating_state state)
2558 {
2559         int i, j, r;
2560
2561         if (amdgpu_emu_mode == 1)
2562                 return 0;
2563
2564         for (j = 0; j < adev->num_ip_blocks; j++) {
2565                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2566                 if (!adev->ip_blocks[i].status.late_initialized)
2567                         continue;
2568                 /* skip CG for GFX on S0ix */
2569                 if (adev->in_s0ix &&
2570                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2571                         continue;
2572                 /* skip CG for VCE/UVD, it's handled specially */
2573                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2574                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2575                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2576                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2577                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2578                         /* enable clockgating to save power */
2579                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2580                                                                                      state);
2581                         if (r) {
2582                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2583                                           adev->ip_blocks[i].version->funcs->name, r);
2584                                 return r;
2585                         }
2586                 }
2587         }
2588
2589         return 0;
2590 }
2591
2592 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2593                                enum amd_powergating_state state)
2594 {
2595         int i, j, r;
2596
2597         if (amdgpu_emu_mode == 1)
2598                 return 0;
2599
2600         for (j = 0; j < adev->num_ip_blocks; j++) {
2601                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2602                 if (!adev->ip_blocks[i].status.late_initialized)
2603                         continue;
2604                 /* skip PG for GFX on S0ix */
2605                 if (adev->in_s0ix &&
2606                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2607                         continue;
2608                 /* skip CG for VCE/UVD, it's handled specially */
2609                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2610                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2611                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2612                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2613                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2614                         /* enable powergating to save power */
2615                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2616                                                                                         state);
2617                         if (r) {
2618                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2619                                           adev->ip_blocks[i].version->funcs->name, r);
2620                                 return r;
2621                         }
2622                 }
2623         }
2624         return 0;
2625 }
2626
2627 static int amdgpu_device_enable_mgpu_fan_boost(void)
2628 {
2629         struct amdgpu_gpu_instance *gpu_ins;
2630         struct amdgpu_device *adev;
2631         int i, ret = 0;
2632
2633         mutex_lock(&mgpu_info.mutex);
2634
2635         /*
2636          * MGPU fan boost feature should be enabled
2637          * only when there are two or more dGPUs in
2638          * the system
2639          */
2640         if (mgpu_info.num_dgpu < 2)
2641                 goto out;
2642
2643         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2644                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2645                 adev = gpu_ins->adev;
2646                 if (!(adev->flags & AMD_IS_APU) &&
2647                     !gpu_ins->mgpu_fan_enabled) {
2648                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2649                         if (ret)
2650                                 break;
2651
2652                         gpu_ins->mgpu_fan_enabled = 1;
2653                 }
2654         }
2655
2656 out:
2657         mutex_unlock(&mgpu_info.mutex);
2658
2659         return ret;
2660 }
2661
2662 /**
2663  * amdgpu_device_ip_late_init - run late init for hardware IPs
2664  *
2665  * @adev: amdgpu_device pointer
2666  *
2667  * Late initialization pass for hardware IPs.  The list of all the hardware
2668  * IPs that make up the asic is walked and the late_init callbacks are run.
2669  * late_init covers any special initialization that an IP requires
2670  * after all of the have been initialized or something that needs to happen
2671  * late in the init process.
2672  * Returns 0 on success, negative error code on failure.
2673  */
2674 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2675 {
2676         struct amdgpu_gpu_instance *gpu_instance;
2677         int i = 0, r;
2678
2679         for (i = 0; i < adev->num_ip_blocks; i++) {
2680                 if (!adev->ip_blocks[i].status.hw)
2681                         continue;
2682                 if (adev->ip_blocks[i].version->funcs->late_init) {
2683                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2684                         if (r) {
2685                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2686                                           adev->ip_blocks[i].version->funcs->name, r);
2687                                 return r;
2688                         }
2689                 }
2690                 adev->ip_blocks[i].status.late_initialized = true;
2691         }
2692
2693         r = amdgpu_ras_late_init(adev);
2694         if (r) {
2695                 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2696                 return r;
2697         }
2698
2699         amdgpu_ras_set_error_query_ready(adev, true);
2700
2701         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2702         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2703
2704         amdgpu_device_fill_reset_magic(adev);
2705
2706         r = amdgpu_device_enable_mgpu_fan_boost();
2707         if (r)
2708                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2709
2710         /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2711         if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2712                                adev->asic_type == CHIP_ALDEBARAN ))
2713                 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2714
2715         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2716                 mutex_lock(&mgpu_info.mutex);
2717
2718                 /*
2719                  * Reset device p-state to low as this was booted with high.
2720                  *
2721                  * This should be performed only after all devices from the same
2722                  * hive get initialized.
2723                  *
2724                  * However, it's unknown how many device in the hive in advance.
2725                  * As this is counted one by one during devices initializations.
2726                  *
2727                  * So, we wait for all XGMI interlinked devices initialized.
2728                  * This may bring some delays as those devices may come from
2729                  * different hives. But that should be OK.
2730                  */
2731                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2732                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2733                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2734                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2735                                         continue;
2736
2737                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2738                                                 AMDGPU_XGMI_PSTATE_MIN);
2739                                 if (r) {
2740                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2741                                         break;
2742                                 }
2743                         }
2744                 }
2745
2746                 mutex_unlock(&mgpu_info.mutex);
2747         }
2748
2749         return 0;
2750 }
2751
2752 /**
2753  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2754  *
2755  * @adev: amdgpu_device pointer
2756  *
2757  * For ASICs need to disable SMC first
2758  */
2759 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2760 {
2761         int i, r;
2762
2763         if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2764                 return;
2765
2766         for (i = 0; i < adev->num_ip_blocks; i++) {
2767                 if (!adev->ip_blocks[i].status.hw)
2768                         continue;
2769                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2770                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2771                         /* XXX handle errors */
2772                         if (r) {
2773                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2774                                           adev->ip_blocks[i].version->funcs->name, r);
2775                         }
2776                         adev->ip_blocks[i].status.hw = false;
2777                         break;
2778                 }
2779         }
2780 }
2781
2782 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2783 {
2784         int i, r;
2785
2786         for (i = 0; i < adev->num_ip_blocks; i++) {
2787                 if (!adev->ip_blocks[i].version->funcs->early_fini)
2788                         continue;
2789
2790                 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2791                 if (r) {
2792                         DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2793                                   adev->ip_blocks[i].version->funcs->name, r);
2794                 }
2795         }
2796
2797         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2798         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2799
2800         amdgpu_amdkfd_suspend(adev, false);
2801
2802         /* Workaroud for ASICs need to disable SMC first */
2803         amdgpu_device_smu_fini_early(adev);
2804
2805         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2806                 if (!adev->ip_blocks[i].status.hw)
2807                         continue;
2808
2809                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2810                 /* XXX handle errors */
2811                 if (r) {
2812                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2813                                   adev->ip_blocks[i].version->funcs->name, r);
2814                 }
2815
2816                 adev->ip_blocks[i].status.hw = false;
2817         }
2818
2819         if (amdgpu_sriov_vf(adev)) {
2820                 if (amdgpu_virt_release_full_gpu(adev, false))
2821                         DRM_ERROR("failed to release exclusive mode on fini\n");
2822         }
2823
2824         return 0;
2825 }
2826
2827 /**
2828  * amdgpu_device_ip_fini - run fini for hardware IPs
2829  *
2830  * @adev: amdgpu_device pointer
2831  *
2832  * Main teardown pass for hardware IPs.  The list of all the hardware
2833  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2834  * are run.  hw_fini tears down the hardware associated with each IP
2835  * and sw_fini tears down any software state associated with each IP.
2836  * Returns 0 on success, negative error code on failure.
2837  */
2838 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2839 {
2840         int i, r;
2841
2842         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2843                 amdgpu_virt_release_ras_err_handler_data(adev);
2844
2845         if (adev->gmc.xgmi.num_physical_nodes > 1)
2846                 amdgpu_xgmi_remove_device(adev);
2847
2848         amdgpu_amdkfd_device_fini_sw(adev);
2849
2850         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2851                 if (!adev->ip_blocks[i].status.sw)
2852                         continue;
2853
2854                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2855                         amdgpu_ucode_free_bo(adev);
2856                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2857                         amdgpu_device_wb_fini(adev);
2858                         amdgpu_device_vram_scratch_fini(adev);
2859                         amdgpu_ib_pool_fini(adev);
2860                 }
2861
2862                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2863                 /* XXX handle errors */
2864                 if (r) {
2865                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2866                                   adev->ip_blocks[i].version->funcs->name, r);
2867                 }
2868                 adev->ip_blocks[i].status.sw = false;
2869                 adev->ip_blocks[i].status.valid = false;
2870         }
2871
2872         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2873                 if (!adev->ip_blocks[i].status.late_initialized)
2874                         continue;
2875                 if (adev->ip_blocks[i].version->funcs->late_fini)
2876                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2877                 adev->ip_blocks[i].status.late_initialized = false;
2878         }
2879
2880         amdgpu_ras_fini(adev);
2881
2882         return 0;
2883 }
2884
2885 /**
2886  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2887  *
2888  * @work: work_struct.
2889  */
2890 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2891 {
2892         struct amdgpu_device *adev =
2893                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2894         int r;
2895
2896         r = amdgpu_ib_ring_tests(adev);
2897         if (r)
2898                 DRM_ERROR("ib ring test failed (%d).\n", r);
2899 }
2900
2901 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2902 {
2903         struct amdgpu_device *adev =
2904                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2905
2906         WARN_ON_ONCE(adev->gfx.gfx_off_state);
2907         WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2908
2909         if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2910                 adev->gfx.gfx_off_state = true;
2911 }
2912
2913 /**
2914  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2915  *
2916  * @adev: amdgpu_device pointer
2917  *
2918  * Main suspend function for hardware IPs.  The list of all the hardware
2919  * IPs that make up the asic is walked, clockgating is disabled and the
2920  * suspend callbacks are run.  suspend puts the hardware and software state
2921  * in each IP into a state suitable for suspend.
2922  * Returns 0 on success, negative error code on failure.
2923  */
2924 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2925 {
2926         int i, r;
2927
2928         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2929         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2930
2931         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2932                 if (!adev->ip_blocks[i].status.valid)
2933                         continue;
2934
2935                 /* displays are handled separately */
2936                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2937                         continue;
2938
2939                 /* XXX handle errors */
2940                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2941                 /* XXX handle errors */
2942                 if (r) {
2943                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2944                                   adev->ip_blocks[i].version->funcs->name, r);
2945                         return r;
2946                 }
2947
2948                 adev->ip_blocks[i].status.hw = false;
2949         }
2950
2951         return 0;
2952 }
2953
2954 /**
2955  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2956  *
2957  * @adev: amdgpu_device pointer
2958  *
2959  * Main suspend function for hardware IPs.  The list of all the hardware
2960  * IPs that make up the asic is walked, clockgating is disabled and the
2961  * suspend callbacks are run.  suspend puts the hardware and software state
2962  * in each IP into a state suitable for suspend.
2963  * Returns 0 on success, negative error code on failure.
2964  */
2965 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2966 {
2967         int i, r;
2968
2969         if (adev->in_s0ix)
2970                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2971
2972         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2973                 if (!adev->ip_blocks[i].status.valid)
2974                         continue;
2975                 /* displays are handled in phase1 */
2976                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2977                         continue;
2978                 /* PSP lost connection when err_event_athub occurs */
2979                 if (amdgpu_ras_intr_triggered() &&
2980                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2981                         adev->ip_blocks[i].status.hw = false;
2982                         continue;
2983                 }
2984
2985                 /* skip unnecessary suspend if we do not initialize them yet */
2986                 if (adev->gmc.xgmi.pending_reset &&
2987                     !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2988                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2989                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2990                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2991                         adev->ip_blocks[i].status.hw = false;
2992                         continue;
2993                 }
2994
2995                 /* skip suspend of gfx and psp for S0ix
2996                  * gfx is in gfxoff state, so on resume it will exit gfxoff just
2997                  * like at runtime. PSP is also part of the always on hardware
2998                  * so no need to suspend it.
2999                  */
3000                 if (adev->in_s0ix &&
3001                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3002                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
3003                         continue;
3004
3005                 /* XXX handle errors */
3006                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3007                 /* XXX handle errors */
3008                 if (r) {
3009                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
3010                                   adev->ip_blocks[i].version->funcs->name, r);
3011                 }
3012                 adev->ip_blocks[i].status.hw = false;
3013                 /* handle putting the SMC in the appropriate state */
3014                 if(!amdgpu_sriov_vf(adev)){
3015                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3016                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3017                                 if (r) {
3018                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3019                                                         adev->mp1_state, r);
3020                                         return r;
3021                                 }
3022                         }
3023                 }
3024         }
3025
3026         return 0;
3027 }
3028
3029 /**
3030  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3031  *
3032  * @adev: amdgpu_device pointer
3033  *
3034  * Main suspend function for hardware IPs.  The list of all the hardware
3035  * IPs that make up the asic is walked, clockgating is disabled and the
3036  * suspend callbacks are run.  suspend puts the hardware and software state
3037  * in each IP into a state suitable for suspend.
3038  * Returns 0 on success, negative error code on failure.
3039  */
3040 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3041 {
3042         int r;
3043
3044         if (amdgpu_sriov_vf(adev)) {
3045                 amdgpu_virt_fini_data_exchange(adev);
3046                 amdgpu_virt_request_full_gpu(adev, false);
3047         }
3048
3049         r = amdgpu_device_ip_suspend_phase1(adev);
3050         if (r)
3051                 return r;
3052         r = amdgpu_device_ip_suspend_phase2(adev);
3053
3054         if (amdgpu_sriov_vf(adev))
3055                 amdgpu_virt_release_full_gpu(adev, false);
3056
3057         return r;
3058 }
3059
3060 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3061 {
3062         int i, r;
3063
3064         static enum amd_ip_block_type ip_order[] = {
3065                 AMD_IP_BLOCK_TYPE_COMMON,
3066                 AMD_IP_BLOCK_TYPE_GMC,
3067                 AMD_IP_BLOCK_TYPE_PSP,
3068                 AMD_IP_BLOCK_TYPE_IH,
3069         };
3070
3071         for (i = 0; i < adev->num_ip_blocks; i++) {
3072                 int j;
3073                 struct amdgpu_ip_block *block;
3074
3075                 block = &adev->ip_blocks[i];
3076                 block->status.hw = false;
3077
3078                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3079
3080                         if (block->version->type != ip_order[j] ||
3081                                 !block->status.valid)
3082                                 continue;
3083
3084                         r = block->version->funcs->hw_init(adev);
3085                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3086                         if (r)
3087                                 return r;
3088                         block->status.hw = true;
3089                 }
3090         }
3091
3092         return 0;
3093 }
3094
3095 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3096 {
3097         int i, r;
3098
3099         static enum amd_ip_block_type ip_order[] = {
3100                 AMD_IP_BLOCK_TYPE_SMC,
3101                 AMD_IP_BLOCK_TYPE_DCE,
3102                 AMD_IP_BLOCK_TYPE_GFX,
3103                 AMD_IP_BLOCK_TYPE_SDMA,
3104                 AMD_IP_BLOCK_TYPE_UVD,
3105                 AMD_IP_BLOCK_TYPE_VCE,
3106                 AMD_IP_BLOCK_TYPE_VCN
3107         };
3108
3109         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3110                 int j;
3111                 struct amdgpu_ip_block *block;
3112
3113                 for (j = 0; j < adev->num_ip_blocks; j++) {
3114                         block = &adev->ip_blocks[j];
3115
3116                         if (block->version->type != ip_order[i] ||
3117                                 !block->status.valid ||
3118                                 block->status.hw)
3119                                 continue;
3120
3121                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3122                                 r = block->version->funcs->resume(adev);
3123                         else
3124                                 r = block->version->funcs->hw_init(adev);
3125
3126                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3127                         if (r)
3128                                 return r;
3129                         block->status.hw = true;
3130                 }
3131         }
3132
3133         return 0;
3134 }
3135
3136 /**
3137  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3138  *
3139  * @adev: amdgpu_device pointer
3140  *
3141  * First resume function for hardware IPs.  The list of all the hardware
3142  * IPs that make up the asic is walked and the resume callbacks are run for
3143  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3144  * after a suspend and updates the software state as necessary.  This
3145  * function is also used for restoring the GPU after a GPU reset.
3146  * Returns 0 on success, negative error code on failure.
3147  */
3148 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3149 {
3150         int i, r;
3151
3152         for (i = 0; i < adev->num_ip_blocks; i++) {
3153                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3154                         continue;
3155                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3156                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3157                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3158                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3159
3160                         r = adev->ip_blocks[i].version->funcs->resume(adev);
3161                         if (r) {
3162                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
3163                                           adev->ip_blocks[i].version->funcs->name, r);
3164                                 return r;
3165                         }
3166                         adev->ip_blocks[i].status.hw = true;
3167                 }
3168         }
3169
3170         return 0;
3171 }
3172
3173 /**
3174  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3175  *
3176  * @adev: amdgpu_device pointer
3177  *
3178  * First resume function for hardware IPs.  The list of all the hardware
3179  * IPs that make up the asic is walked and the resume callbacks are run for
3180  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3181  * functional state after a suspend and updates the software state as
3182  * necessary.  This function is also used for restoring the GPU after a GPU
3183  * reset.
3184  * Returns 0 on success, negative error code on failure.
3185  */
3186 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3187 {
3188         int i, r;
3189
3190         for (i = 0; i < adev->num_ip_blocks; i++) {
3191                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3192                         continue;
3193                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3194                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3195                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3196                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3197                         continue;
3198                 r = adev->ip_blocks[i].version->funcs->resume(adev);
3199                 if (r) {
3200                         DRM_ERROR("resume of IP block <%s> failed %d\n",
3201                                   adev->ip_blocks[i].version->funcs->name, r);
3202                         return r;
3203                 }
3204                 adev->ip_blocks[i].status.hw = true;
3205         }
3206
3207         return 0;
3208 }
3209
3210 /**
3211  * amdgpu_device_ip_resume - run resume for hardware IPs
3212  *
3213  * @adev: amdgpu_device pointer
3214  *
3215  * Main resume function for hardware IPs.  The hardware IPs
3216  * are split into two resume functions because they are
3217  * are also used in in recovering from a GPU reset and some additional
3218  * steps need to be take between them.  In this case (S3/S4) they are
3219  * run sequentially.
3220  * Returns 0 on success, negative error code on failure.
3221  */
3222 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3223 {
3224         int r;
3225
3226         r = amdgpu_amdkfd_resume_iommu(adev);
3227         if (r)
3228                 return r;
3229
3230         r = amdgpu_device_ip_resume_phase1(adev);
3231         if (r)
3232                 return r;
3233
3234         r = amdgpu_device_fw_loading(adev);
3235         if (r)
3236                 return r;
3237
3238         r = amdgpu_device_ip_resume_phase2(adev);
3239
3240         return r;
3241 }
3242
3243 /**
3244  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3245  *
3246  * @adev: amdgpu_device pointer
3247  *
3248  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3249  */
3250 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3251 {
3252         if (amdgpu_sriov_vf(adev)) {
3253                 if (adev->is_atom_fw) {
3254                         if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3255                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3256                 } else {
3257                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3258                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3259                 }
3260
3261                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3262                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3263         }
3264 }
3265
3266 /**
3267  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3268  *
3269  * @asic_type: AMD asic type
3270  *
3271  * Check if there is DC (new modesetting infrastructre) support for an asic.
3272  * returns true if DC has support, false if not.
3273  */
3274 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3275 {
3276         switch (asic_type) {
3277 #ifdef CONFIG_DRM_AMDGPU_SI
3278         case CHIP_HAINAN:
3279 #endif
3280         case CHIP_TOPAZ:
3281                 /* chips with no display hardware */
3282                 return false;
3283 #if defined(CONFIG_DRM_AMD_DC)
3284         case CHIP_TAHITI:
3285         case CHIP_PITCAIRN:
3286         case CHIP_VERDE:
3287         case CHIP_OLAND:
3288                 /*
3289                  * We have systems in the wild with these ASICs that require
3290                  * LVDS and VGA support which is not supported with DC.
3291                  *
3292                  * Fallback to the non-DC driver here by default so as not to
3293                  * cause regressions.
3294                  */
3295 #if defined(CONFIG_DRM_AMD_DC_SI)
3296                 return amdgpu_dc > 0;
3297 #else
3298                 return false;
3299 #endif
3300         case CHIP_BONAIRE:
3301         case CHIP_KAVERI:
3302         case CHIP_KABINI:
3303         case CHIP_MULLINS:
3304                 /*
3305                  * We have systems in the wild with these ASICs that require
3306                  * VGA support which is not supported with DC.
3307                  *
3308                  * Fallback to the non-DC driver here by default so as not to
3309                  * cause regressions.
3310                  */
3311                 return amdgpu_dc > 0;
3312         default:
3313                 return amdgpu_dc != 0;
3314 #else
3315         default:
3316                 if (amdgpu_dc > 0)
3317                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3318                                          "but isn't supported by ASIC, ignoring\n");
3319                 return false;
3320 #endif
3321         }
3322 }
3323
3324 /**
3325  * amdgpu_device_has_dc_support - check if dc is supported
3326  *
3327  * @adev: amdgpu_device pointer
3328  *
3329  * Returns true for supported, false for not supported
3330  */
3331 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3332 {
3333         if (amdgpu_sriov_vf(adev) ||
3334             adev->enable_virtual_display ||
3335             (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3336                 return false;
3337
3338         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3339 }
3340
3341 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3342 {
3343         struct amdgpu_device *adev =
3344                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3345         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3346
3347         /* It's a bug to not have a hive within this function */
3348         if (WARN_ON(!hive))
3349                 return;
3350
3351         /*
3352          * Use task barrier to synchronize all xgmi reset works across the
3353          * hive. task_barrier_enter and task_barrier_exit will block
3354          * until all the threads running the xgmi reset works reach
3355          * those points. task_barrier_full will do both blocks.
3356          */
3357         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3358
3359                 task_barrier_enter(&hive->tb);
3360                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3361
3362                 if (adev->asic_reset_res)
3363                         goto fail;
3364
3365                 task_barrier_exit(&hive->tb);
3366                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3367
3368                 if (adev->asic_reset_res)
3369                         goto fail;
3370
3371                 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3372                     adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3373                         adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3374         } else {
3375
3376                 task_barrier_full(&hive->tb);
3377                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3378         }
3379
3380 fail:
3381         if (adev->asic_reset_res)
3382                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3383                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3384         amdgpu_put_xgmi_hive(hive);
3385 }
3386
3387 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3388 {
3389         char *input = amdgpu_lockup_timeout;
3390         char *timeout_setting = NULL;
3391         int index = 0;
3392         long timeout;
3393         int ret = 0;
3394
3395         /*
3396          * By default timeout for non compute jobs is 10000
3397          * and 60000 for compute jobs.
3398          * In SR-IOV or passthrough mode, timeout for compute
3399          * jobs are 60000 by default.
3400          */
3401         adev->gfx_timeout = msecs_to_jiffies(10000);
3402         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3403         if (amdgpu_sriov_vf(adev))
3404                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3405                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3406         else
3407                 adev->compute_timeout =  msecs_to_jiffies(60000);
3408
3409         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3410                 while ((timeout_setting = strsep(&input, ",")) &&
3411                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3412                         ret = kstrtol(timeout_setting, 0, &timeout);
3413                         if (ret)
3414                                 return ret;
3415
3416                         if (timeout == 0) {
3417                                 index++;
3418                                 continue;
3419                         } else if (timeout < 0) {
3420                                 timeout = MAX_SCHEDULE_TIMEOUT;
3421                                 dev_warn(adev->dev, "lockup timeout disabled");
3422                                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3423                         } else {
3424                                 timeout = msecs_to_jiffies(timeout);
3425                         }
3426
3427                         switch (index++) {
3428                         case 0:
3429                                 adev->gfx_timeout = timeout;
3430                                 break;
3431                         case 1:
3432                                 adev->compute_timeout = timeout;
3433                                 break;
3434                         case 2:
3435                                 adev->sdma_timeout = timeout;
3436                                 break;
3437                         case 3:
3438                                 adev->video_timeout = timeout;
3439                                 break;
3440                         default:
3441                                 break;
3442                         }
3443                 }
3444                 /*
3445                  * There is only one value specified and
3446                  * it should apply to all non-compute jobs.
3447                  */
3448                 if (index == 1) {
3449                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3450                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3451                                 adev->compute_timeout = adev->gfx_timeout;
3452                 }
3453         }
3454
3455         return ret;
3456 }
3457
3458 /**
3459  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3460  *
3461  * @adev: amdgpu_device pointer
3462  *
3463  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3464  */
3465 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3466 {
3467         struct iommu_domain *domain;
3468
3469         domain = iommu_get_domain_for_dev(adev->dev);
3470         if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3471                 adev->ram_is_direct_mapped = true;
3472 }
3473
3474 static const struct attribute *amdgpu_dev_attributes[] = {
3475         &dev_attr_product_name.attr,
3476         &dev_attr_product_number.attr,
3477         &dev_attr_serial_number.attr,
3478         &dev_attr_pcie_replay_count.attr,
3479         NULL
3480 };
3481
3482 /**
3483  * amdgpu_device_init - initialize the driver
3484  *
3485  * @adev: amdgpu_device pointer
3486  * @flags: driver flags
3487  *
3488  * Initializes the driver info and hw (all asics).
3489  * Returns 0 for success or an error on failure.
3490  * Called at driver startup.
3491  */
3492 int amdgpu_device_init(struct amdgpu_device *adev,
3493                        uint32_t flags)
3494 {
3495         struct drm_device *ddev = adev_to_drm(adev);
3496         struct pci_dev *pdev = adev->pdev;
3497         int r, i;
3498         bool px = false;
3499         u32 max_MBps;
3500
3501         adev->shutdown = false;
3502         adev->flags = flags;
3503
3504         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3505                 adev->asic_type = amdgpu_force_asic_type;
3506         else
3507                 adev->asic_type = flags & AMD_ASIC_MASK;
3508
3509         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3510         if (amdgpu_emu_mode == 1)
3511                 adev->usec_timeout *= 10;
3512         adev->gmc.gart_size = 512 * 1024 * 1024;
3513         adev->accel_working = false;
3514         adev->num_rings = 0;
3515         RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3516         adev->mman.buffer_funcs = NULL;
3517         adev->mman.buffer_funcs_ring = NULL;
3518         adev->vm_manager.vm_pte_funcs = NULL;
3519         adev->vm_manager.vm_pte_num_scheds = 0;
3520         adev->gmc.gmc_funcs = NULL;
3521         adev->harvest_ip_mask = 0x0;
3522         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3523         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3524
3525         adev->smc_rreg = &amdgpu_invalid_rreg;
3526         adev->smc_wreg = &amdgpu_invalid_wreg;
3527         adev->pcie_rreg = &amdgpu_invalid_rreg;
3528         adev->pcie_wreg = &amdgpu_invalid_wreg;
3529         adev->pciep_rreg = &amdgpu_invalid_rreg;
3530         adev->pciep_wreg = &amdgpu_invalid_wreg;
3531         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3532         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3533         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3534         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3535         adev->didt_rreg = &amdgpu_invalid_rreg;
3536         adev->didt_wreg = &amdgpu_invalid_wreg;
3537         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3538         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3539         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3540         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3541
3542         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3543                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3544                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3545
3546         /* mutex initialization are all done here so we
3547          * can recall function without having locking issues */
3548         mutex_init(&adev->firmware.mutex);
3549         mutex_init(&adev->pm.mutex);
3550         mutex_init(&adev->gfx.gpu_clock_mutex);
3551         mutex_init(&adev->srbm_mutex);
3552         mutex_init(&adev->gfx.pipe_reserve_mutex);
3553         mutex_init(&adev->gfx.gfx_off_mutex);
3554         mutex_init(&adev->grbm_idx_mutex);
3555         mutex_init(&adev->mn_lock);
3556         mutex_init(&adev->virt.vf_errors.lock);
3557         hash_init(adev->mn_hash);
3558         mutex_init(&adev->psp.mutex);
3559         mutex_init(&adev->notifier_lock);
3560         mutex_init(&adev->pm.stable_pstate_ctx_lock);
3561         mutex_init(&adev->benchmark_mutex);
3562
3563         amdgpu_device_init_apu_flags(adev);
3564
3565         r = amdgpu_device_check_arguments(adev);
3566         if (r)
3567                 return r;
3568
3569         spin_lock_init(&adev->mmio_idx_lock);
3570         spin_lock_init(&adev->smc_idx_lock);
3571         spin_lock_init(&adev->pcie_idx_lock);
3572         spin_lock_init(&adev->uvd_ctx_idx_lock);
3573         spin_lock_init(&adev->didt_idx_lock);
3574         spin_lock_init(&adev->gc_cac_idx_lock);
3575         spin_lock_init(&adev->se_cac_idx_lock);
3576         spin_lock_init(&adev->audio_endpt_idx_lock);
3577         spin_lock_init(&adev->mm_stats.lock);
3578
3579         INIT_LIST_HEAD(&adev->shadow_list);
3580         mutex_init(&adev->shadow_list_lock);
3581
3582         INIT_LIST_HEAD(&adev->reset_list);
3583
3584         INIT_LIST_HEAD(&adev->ras_list);
3585
3586         INIT_DELAYED_WORK(&adev->delayed_init_work,
3587                           amdgpu_device_delayed_init_work_handler);
3588         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3589                           amdgpu_device_delay_enable_gfx_off);
3590
3591         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3592
3593         adev->gfx.gfx_off_req_count = 1;
3594         adev->gfx.gfx_off_residency = 0;
3595         adev->gfx.gfx_off_entrycount = 0;
3596         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3597
3598         atomic_set(&adev->throttling_logging_enabled, 1);
3599         /*
3600          * If throttling continues, logging will be performed every minute
3601          * to avoid log flooding. "-1" is subtracted since the thermal
3602          * throttling interrupt comes every second. Thus, the total logging
3603          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3604          * for throttling interrupt) = 60 seconds.
3605          */
3606         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3607         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3608
3609         /* Registers mapping */
3610         /* TODO: block userspace mapping of io register */
3611         if (adev->asic_type >= CHIP_BONAIRE) {
3612                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3613                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3614         } else {
3615                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3616                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3617         }
3618
3619         for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3620                 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3621
3622         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3623         if (adev->rmmio == NULL) {
3624                 return -ENOMEM;
3625         }
3626         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3627         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3628
3629         amdgpu_device_get_pcie_info(adev);
3630
3631         if (amdgpu_mcbp)
3632                 DRM_INFO("MCBP is enabled\n");
3633
3634         /*
3635          * Reset domain needs to be present early, before XGMI hive discovered
3636          * (if any) and intitialized to use reset sem and in_gpu reset flag
3637          * early on during init and before calling to RREG32.
3638          */
3639         adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3640         if (!adev->reset_domain)
3641                 return -ENOMEM;
3642
3643         /* detect hw virtualization here */
3644         amdgpu_detect_virtualization(adev);
3645
3646         r = amdgpu_device_get_job_timeout_settings(adev);
3647         if (r) {
3648                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3649                 return r;
3650         }
3651
3652         /* early init functions */
3653         r = amdgpu_device_ip_early_init(adev);
3654         if (r)
3655                 return r;
3656
3657         /* Enable TMZ based on IP_VERSION */
3658         amdgpu_gmc_tmz_set(adev);
3659
3660         amdgpu_gmc_noretry_set(adev);
3661         /* Need to get xgmi info early to decide the reset behavior*/
3662         if (adev->gmc.xgmi.supported) {
3663                 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3664                 if (r)
3665                         return r;
3666         }
3667
3668         /* enable PCIE atomic ops */
3669         if (amdgpu_sriov_vf(adev))
3670                 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3671                         adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3672                         (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3673         else
3674                 adev->have_atomics_support =
3675                         !pci_enable_atomic_ops_to_root(adev->pdev,
3676                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3677                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3678         if (!adev->have_atomics_support)
3679                 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3680
3681         /* doorbell bar mapping and doorbell index init*/
3682         amdgpu_device_doorbell_init(adev);
3683
3684         if (amdgpu_emu_mode == 1) {
3685                 /* post the asic on emulation mode */
3686                 emu_soc_asic_init(adev);
3687                 goto fence_driver_init;
3688         }
3689
3690         amdgpu_reset_init(adev);
3691
3692         /* detect if we are with an SRIOV vbios */
3693         amdgpu_device_detect_sriov_bios(adev);
3694
3695         /* check if we need to reset the asic
3696          *  E.g., driver was not cleanly unloaded previously, etc.
3697          */
3698         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3699                 if (adev->gmc.xgmi.num_physical_nodes) {
3700                         dev_info(adev->dev, "Pending hive reset.\n");
3701                         adev->gmc.xgmi.pending_reset = true;
3702                         /* Only need to init necessary block for SMU to handle the reset */
3703                         for (i = 0; i < adev->num_ip_blocks; i++) {
3704                                 if (!adev->ip_blocks[i].status.valid)
3705                                         continue;
3706                                 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3707                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3708                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3709                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3710                                         DRM_DEBUG("IP %s disabled for hw_init.\n",
3711                                                 adev->ip_blocks[i].version->funcs->name);
3712                                         adev->ip_blocks[i].status.hw = true;
3713                                 }
3714                         }
3715                 } else {
3716                         r = amdgpu_asic_reset(adev);
3717                         if (r) {
3718                                 dev_err(adev->dev, "asic reset on init failed\n");
3719                                 goto failed;
3720                         }
3721                 }
3722         }
3723
3724         pci_enable_pcie_error_reporting(adev->pdev);
3725
3726         /* Post card if necessary */
3727         if (amdgpu_device_need_post(adev)) {
3728                 if (!adev->bios) {
3729                         dev_err(adev->dev, "no vBIOS found\n");
3730                         r = -EINVAL;
3731                         goto failed;
3732                 }
3733                 DRM_INFO("GPU posting now...\n");
3734                 r = amdgpu_device_asic_init(adev);
3735                 if (r) {
3736                         dev_err(adev->dev, "gpu post error!\n");
3737                         goto failed;
3738                 }
3739         }
3740
3741         if (adev->is_atom_fw) {
3742                 /* Initialize clocks */
3743                 r = amdgpu_atomfirmware_get_clock_info(adev);
3744                 if (r) {
3745                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3746                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3747                         goto failed;
3748                 }
3749         } else {
3750                 /* Initialize clocks */
3751                 r = amdgpu_atombios_get_clock_info(adev);
3752                 if (r) {
3753                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3754                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3755                         goto failed;
3756                 }
3757                 /* init i2c buses */
3758                 if (!amdgpu_device_has_dc_support(adev))
3759                         amdgpu_atombios_i2c_init(adev);
3760         }
3761
3762 fence_driver_init:
3763         /* Fence driver */
3764         r = amdgpu_fence_driver_sw_init(adev);
3765         if (r) {
3766                 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3767                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3768                 goto failed;
3769         }
3770
3771         /* init the mode config */
3772         drm_mode_config_init(adev_to_drm(adev));
3773
3774         r = amdgpu_device_ip_init(adev);
3775         if (r) {
3776                 /* failed in exclusive mode due to timeout */
3777                 if (amdgpu_sriov_vf(adev) &&
3778                     !amdgpu_sriov_runtime(adev) &&
3779                     amdgpu_virt_mmio_blocked(adev) &&
3780                     !amdgpu_virt_wait_reset(adev)) {
3781                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3782                         /* Don't send request since VF is inactive. */
3783                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3784                         adev->virt.ops = NULL;
3785                         r = -EAGAIN;
3786                         goto release_ras_con;
3787                 }
3788                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3789                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3790                 goto release_ras_con;
3791         }
3792
3793         amdgpu_fence_driver_hw_init(adev);
3794
3795         dev_info(adev->dev,
3796                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3797                         adev->gfx.config.max_shader_engines,
3798                         adev->gfx.config.max_sh_per_se,
3799                         adev->gfx.config.max_cu_per_sh,
3800                         adev->gfx.cu_info.number);
3801
3802         adev->accel_working = true;
3803
3804         amdgpu_vm_check_compute_bug(adev);
3805
3806         /* Initialize the buffer migration limit. */
3807         if (amdgpu_moverate >= 0)
3808                 max_MBps = amdgpu_moverate;
3809         else
3810                 max_MBps = 8; /* Allow 8 MB/s. */
3811         /* Get a log2 for easy divisions. */
3812         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3813
3814         r = amdgpu_pm_sysfs_init(adev);
3815         if (r) {
3816                 adev->pm_sysfs_en = false;
3817                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3818         } else
3819                 adev->pm_sysfs_en = true;
3820
3821         r = amdgpu_ucode_sysfs_init(adev);
3822         if (r) {
3823                 adev->ucode_sysfs_en = false;
3824                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3825         } else
3826                 adev->ucode_sysfs_en = true;
3827
3828         r = amdgpu_psp_sysfs_init(adev);
3829         if (r) {
3830                 adev->psp_sysfs_en = false;
3831                 if (!amdgpu_sriov_vf(adev))
3832                         DRM_ERROR("Creating psp sysfs failed\n");
3833         } else
3834                 adev->psp_sysfs_en = true;
3835
3836         /*
3837          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3838          * Otherwise the mgpu fan boost feature will be skipped due to the
3839          * gpu instance is counted less.
3840          */
3841         amdgpu_register_gpu_instance(adev);
3842
3843         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3844          * explicit gating rather than handling it automatically.
3845          */
3846         if (!adev->gmc.xgmi.pending_reset) {
3847                 r = amdgpu_device_ip_late_init(adev);
3848                 if (r) {
3849                         dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3850                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3851                         goto release_ras_con;
3852                 }
3853                 /* must succeed. */
3854                 amdgpu_ras_resume(adev);
3855                 queue_delayed_work(system_wq, &adev->delayed_init_work,
3856                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3857         }
3858
3859         if (amdgpu_sriov_vf(adev))
3860                 flush_delayed_work(&adev->delayed_init_work);
3861
3862         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3863         if (r)
3864                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3865
3866         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3867                 r = amdgpu_pmu_init(adev);
3868         if (r)
3869                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3870
3871         /* Have stored pci confspace at hand for restore in sudden PCI error */
3872         if (amdgpu_device_cache_pci_state(adev->pdev))
3873                 pci_restore_state(pdev);
3874
3875         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3876         /* this will fail for cards that aren't VGA class devices, just
3877          * ignore it */
3878         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3879                 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3880
3881         if (amdgpu_device_supports_px(ddev)) {
3882                 px = true;
3883                 vga_switcheroo_register_client(adev->pdev,
3884                                                &amdgpu_switcheroo_ops, px);
3885                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3886         }
3887
3888         if (adev->gmc.xgmi.pending_reset)
3889                 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3890                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3891
3892         amdgpu_device_check_iommu_direct_map(adev);
3893
3894         return 0;
3895
3896 release_ras_con:
3897         amdgpu_release_ras_context(adev);
3898
3899 failed:
3900         amdgpu_vf_error_trans_all(adev);
3901
3902         return r;
3903 }
3904
3905 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3906 {
3907
3908         /* Clear all CPU mappings pointing to this device */
3909         unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3910
3911         /* Unmap all mapped bars - Doorbell, registers and VRAM */
3912         amdgpu_device_doorbell_fini(adev);
3913
3914         iounmap(adev->rmmio);
3915         adev->rmmio = NULL;
3916         if (adev->mman.aper_base_kaddr)
3917                 iounmap(adev->mman.aper_base_kaddr);
3918         adev->mman.aper_base_kaddr = NULL;
3919
3920         /* Memory manager related */
3921         if (!adev->gmc.xgmi.connected_to_cpu) {
3922                 arch_phys_wc_del(adev->gmc.vram_mtrr);
3923                 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3924         }
3925 }
3926
3927 /**
3928  * amdgpu_device_fini_hw - tear down the driver
3929  *
3930  * @adev: amdgpu_device pointer
3931  *
3932  * Tear down the driver info (all asics).
3933  * Called at driver shutdown.
3934  */
3935 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3936 {
3937         dev_info(adev->dev, "amdgpu: finishing device.\n");
3938         flush_delayed_work(&adev->delayed_init_work);
3939         adev->shutdown = true;
3940
3941         /* make sure IB test finished before entering exclusive mode
3942          * to avoid preemption on IB test
3943          * */
3944         if (amdgpu_sriov_vf(adev)) {
3945                 amdgpu_virt_request_full_gpu(adev, false);
3946                 amdgpu_virt_fini_data_exchange(adev);
3947         }
3948
3949         /* disable all interrupts */
3950         amdgpu_irq_disable_all(adev);
3951         if (adev->mode_info.mode_config_initialized){
3952                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3953                         drm_helper_force_disable_all(adev_to_drm(adev));
3954                 else
3955                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3956         }
3957         amdgpu_fence_driver_hw_fini(adev);
3958
3959         if (adev->mman.initialized) {
3960                 flush_delayed_work(&adev->mman.bdev.wq);
3961                 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3962         }
3963
3964         if (adev->pm_sysfs_en)
3965                 amdgpu_pm_sysfs_fini(adev);
3966         if (adev->ucode_sysfs_en)
3967                 amdgpu_ucode_sysfs_fini(adev);
3968         if (adev->psp_sysfs_en)
3969                 amdgpu_psp_sysfs_fini(adev);
3970         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3971
3972         /* disable ras feature must before hw fini */
3973         amdgpu_ras_pre_fini(adev);
3974
3975         amdgpu_device_ip_fini_early(adev);
3976
3977         amdgpu_irq_fini_hw(adev);
3978
3979         if (adev->mman.initialized)
3980                 ttm_device_clear_dma_mappings(&adev->mman.bdev);
3981
3982         amdgpu_gart_dummy_page_fini(adev);
3983
3984         amdgpu_device_unmap_mmio(adev);
3985
3986 }
3987
3988 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3989 {
3990         int idx;
3991
3992         amdgpu_fence_driver_sw_fini(adev);
3993         amdgpu_device_ip_fini(adev);
3994         release_firmware(adev->firmware.gpu_info_fw);
3995         adev->firmware.gpu_info_fw = NULL;
3996         adev->accel_working = false;
3997         dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
3998
3999         amdgpu_reset_fini(adev);
4000
4001         /* free i2c buses */
4002         if (!amdgpu_device_has_dc_support(adev))
4003                 amdgpu_i2c_fini(adev);
4004
4005         if (amdgpu_emu_mode != 1)
4006                 amdgpu_atombios_fini(adev);
4007
4008         kfree(adev->bios);
4009         adev->bios = NULL;
4010         if (amdgpu_device_supports_px(adev_to_drm(adev))) {
4011                 vga_switcheroo_unregister_client(adev->pdev);
4012                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4013         }
4014         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4015                 vga_client_unregister(adev->pdev);
4016
4017         if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4018
4019                 iounmap(adev->rmmio);
4020                 adev->rmmio = NULL;
4021                 amdgpu_device_doorbell_fini(adev);
4022                 drm_dev_exit(idx);
4023         }
4024
4025         if (IS_ENABLED(CONFIG_PERF_EVENTS))
4026                 amdgpu_pmu_fini(adev);
4027         if (adev->mman.discovery_bin)
4028                 amdgpu_discovery_fini(adev);
4029
4030         amdgpu_reset_put_reset_domain(adev->reset_domain);
4031         adev->reset_domain = NULL;
4032
4033         kfree(adev->pci_state);
4034
4035 }
4036
4037 /**
4038  * amdgpu_device_evict_resources - evict device resources
4039  * @adev: amdgpu device object
4040  *
4041  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4042  * of the vram memory type. Mainly used for evicting device resources
4043  * at suspend time.
4044  *
4045  */
4046 static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
4047 {
4048         /* No need to evict vram on APUs for suspend to ram or s2idle */
4049         if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4050                 return;
4051
4052         if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
4053                 DRM_WARN("evicting device resources failed\n");
4054
4055 }
4056
4057 /*
4058  * Suspend & resume.
4059  */
4060 /**
4061  * amdgpu_device_suspend - initiate device suspend
4062  *
4063  * @dev: drm dev pointer
4064  * @fbcon : notify the fbdev of suspend
4065  *
4066  * Puts the hw in the suspend state (all asics).
4067  * Returns 0 for success or an error on failure.
4068  * Called at driver suspend.
4069  */
4070 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4071 {
4072         struct amdgpu_device *adev = drm_to_adev(dev);
4073         int r = 0;
4074
4075         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4076                 return 0;
4077
4078         adev->in_suspend = true;
4079
4080         if (amdgpu_sriov_vf(adev)) {
4081                 amdgpu_virt_fini_data_exchange(adev);
4082                 r = amdgpu_virt_request_full_gpu(adev, false);
4083                 if (r)
4084                         return r;
4085         }
4086
4087         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4088                 DRM_WARN("smart shift update failed\n");
4089
4090         drm_kms_helper_poll_disable(dev);
4091
4092         if (fbcon)
4093                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4094
4095         cancel_delayed_work_sync(&adev->delayed_init_work);
4096
4097         amdgpu_ras_suspend(adev);
4098
4099         amdgpu_device_ip_suspend_phase1(adev);
4100
4101         if (!adev->in_s0ix)
4102                 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4103
4104         amdgpu_device_evict_resources(adev);
4105
4106         amdgpu_fence_driver_hw_fini(adev);
4107
4108         amdgpu_device_ip_suspend_phase2(adev);
4109
4110         if (amdgpu_sriov_vf(adev))
4111                 amdgpu_virt_release_full_gpu(adev, false);
4112
4113         return 0;
4114 }
4115
4116 /**
4117  * amdgpu_device_resume - initiate device resume
4118  *
4119  * @dev: drm dev pointer
4120  * @fbcon : notify the fbdev of resume
4121  *
4122  * Bring the hw back to operating state (all asics).
4123  * Returns 0 for success or an error on failure.
4124  * Called at driver resume.
4125  */
4126 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4127 {
4128         struct amdgpu_device *adev = drm_to_adev(dev);
4129         int r = 0;
4130
4131         if (amdgpu_sriov_vf(adev)) {
4132                 r = amdgpu_virt_request_full_gpu(adev, true);
4133                 if (r)
4134                         return r;
4135         }
4136
4137         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4138                 return 0;
4139
4140         if (adev->in_s0ix)
4141                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4142
4143         /* post card */
4144         if (amdgpu_device_need_post(adev)) {
4145                 r = amdgpu_device_asic_init(adev);
4146                 if (r)
4147                         dev_err(adev->dev, "amdgpu asic init failed\n");
4148         }
4149
4150         r = amdgpu_device_ip_resume(adev);
4151
4152         /* no matter what r is, always need to properly release full GPU */
4153         if (amdgpu_sriov_vf(adev)) {
4154                 amdgpu_virt_init_data_exchange(adev);
4155                 amdgpu_virt_release_full_gpu(adev, true);
4156         }
4157
4158         if (r) {
4159                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4160                 return r;
4161         }
4162         amdgpu_fence_driver_hw_init(adev);
4163
4164         r = amdgpu_device_ip_late_init(adev);
4165         if (r)
4166                 return r;
4167
4168         queue_delayed_work(system_wq, &adev->delayed_init_work,
4169                            msecs_to_jiffies(AMDGPU_RESUME_MS));
4170
4171         if (!adev->in_s0ix) {
4172                 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4173                 if (r)
4174                         return r;
4175         }
4176
4177         /* Make sure IB tests flushed */
4178         flush_delayed_work(&adev->delayed_init_work);
4179
4180         if (fbcon)
4181                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4182
4183         drm_kms_helper_poll_enable(dev);
4184
4185         amdgpu_ras_resume(adev);
4186
4187         /*
4188          * Most of the connector probing functions try to acquire runtime pm
4189          * refs to ensure that the GPU is powered on when connector polling is
4190          * performed. Since we're calling this from a runtime PM callback,
4191          * trying to acquire rpm refs will cause us to deadlock.
4192          *
4193          * Since we're guaranteed to be holding the rpm lock, it's safe to
4194          * temporarily disable the rpm helpers so this doesn't deadlock us.
4195          */
4196 #ifdef CONFIG_PM
4197         dev->dev->power.disable_depth++;
4198 #endif
4199         if (!amdgpu_device_has_dc_support(adev))
4200                 drm_helper_hpd_irq_event(dev);
4201         else
4202                 drm_kms_helper_hotplug_event(dev);
4203 #ifdef CONFIG_PM
4204         dev->dev->power.disable_depth--;
4205 #endif
4206         adev->in_suspend = false;
4207
4208         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4209                 DRM_WARN("smart shift update failed\n");
4210
4211         return 0;
4212 }
4213
4214 /**
4215  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4216  *
4217  * @adev: amdgpu_device pointer
4218  *
4219  * The list of all the hardware IPs that make up the asic is walked and
4220  * the check_soft_reset callbacks are run.  check_soft_reset determines
4221  * if the asic is still hung or not.
4222  * Returns true if any of the IPs are still in a hung state, false if not.
4223  */
4224 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4225 {
4226         int i;
4227         bool asic_hang = false;
4228
4229         if (amdgpu_sriov_vf(adev))
4230                 return true;
4231
4232         if (amdgpu_asic_need_full_reset(adev))
4233                 return true;
4234
4235         for (i = 0; i < adev->num_ip_blocks; i++) {
4236                 if (!adev->ip_blocks[i].status.valid)
4237                         continue;
4238                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4239                         adev->ip_blocks[i].status.hang =
4240                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4241                 if (adev->ip_blocks[i].status.hang) {
4242                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4243                         asic_hang = true;
4244                 }
4245         }
4246         return asic_hang;
4247 }
4248
4249 /**
4250  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4251  *
4252  * @adev: amdgpu_device pointer
4253  *
4254  * The list of all the hardware IPs that make up the asic is walked and the
4255  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4256  * handles any IP specific hardware or software state changes that are
4257  * necessary for a soft reset to succeed.
4258  * Returns 0 on success, negative error code on failure.
4259  */
4260 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4261 {
4262         int i, r = 0;
4263
4264         for (i = 0; i < adev->num_ip_blocks; i++) {
4265                 if (!adev->ip_blocks[i].status.valid)
4266                         continue;
4267                 if (adev->ip_blocks[i].status.hang &&
4268                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4269                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4270                         if (r)
4271                                 return r;
4272                 }
4273         }
4274
4275         return 0;
4276 }
4277
4278 /**
4279  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4280  *
4281  * @adev: amdgpu_device pointer
4282  *
4283  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4284  * reset is necessary to recover.
4285  * Returns true if a full asic reset is required, false if not.
4286  */
4287 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4288 {
4289         int i;
4290
4291         if (amdgpu_asic_need_full_reset(adev))
4292                 return true;
4293
4294         for (i = 0; i < adev->num_ip_blocks; i++) {
4295                 if (!adev->ip_blocks[i].status.valid)
4296                         continue;
4297                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4298                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4299                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4300                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4301                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4302                         if (adev->ip_blocks[i].status.hang) {
4303                                 dev_info(adev->dev, "Some block need full reset!\n");
4304                                 return true;
4305                         }
4306                 }
4307         }
4308         return false;
4309 }
4310
4311 /**
4312  * amdgpu_device_ip_soft_reset - do a soft reset
4313  *
4314  * @adev: amdgpu_device pointer
4315  *
4316  * The list of all the hardware IPs that make up the asic is walked and the
4317  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4318  * IP specific hardware or software state changes that are necessary to soft
4319  * reset the IP.
4320  * Returns 0 on success, negative error code on failure.
4321  */
4322 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4323 {
4324         int i, r = 0;
4325
4326         for (i = 0; i < adev->num_ip_blocks; i++) {
4327                 if (!adev->ip_blocks[i].status.valid)
4328                         continue;
4329                 if (adev->ip_blocks[i].status.hang &&
4330                     adev->ip_blocks[i].version->funcs->soft_reset) {
4331                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4332                         if (r)
4333                                 return r;
4334                 }
4335         }
4336
4337         return 0;
4338 }
4339
4340 /**
4341  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4342  *
4343  * @adev: amdgpu_device pointer
4344  *
4345  * The list of all the hardware IPs that make up the asic is walked and the
4346  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4347  * handles any IP specific hardware or software state changes that are
4348  * necessary after the IP has been soft reset.
4349  * Returns 0 on success, negative error code on failure.
4350  */
4351 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4352 {
4353         int i, r = 0;
4354
4355         for (i = 0; i < adev->num_ip_blocks; i++) {
4356                 if (!adev->ip_blocks[i].status.valid)
4357                         continue;
4358                 if (adev->ip_blocks[i].status.hang &&
4359                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4360                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4361                 if (r)
4362                         return r;
4363         }
4364
4365         return 0;
4366 }
4367
4368 /**
4369  * amdgpu_device_recover_vram - Recover some VRAM contents
4370  *
4371  * @adev: amdgpu_device pointer
4372  *
4373  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4374  * restore things like GPUVM page tables after a GPU reset where
4375  * the contents of VRAM might be lost.
4376  *
4377  * Returns:
4378  * 0 on success, negative error code on failure.
4379  */
4380 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4381 {
4382         struct dma_fence *fence = NULL, *next = NULL;
4383         struct amdgpu_bo *shadow;
4384         struct amdgpu_bo_vm *vmbo;
4385         long r = 1, tmo;
4386
4387         if (amdgpu_sriov_runtime(adev))
4388                 tmo = msecs_to_jiffies(8000);
4389         else
4390                 tmo = msecs_to_jiffies(100);
4391
4392         dev_info(adev->dev, "recover vram bo from shadow start\n");
4393         mutex_lock(&adev->shadow_list_lock);
4394         list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4395                 shadow = &vmbo->bo;
4396                 /* No need to recover an evicted BO */
4397                 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4398                     shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4399                     shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4400                         continue;
4401
4402                 r = amdgpu_bo_restore_shadow(shadow, &next);
4403                 if (r)
4404                         break;
4405
4406                 if (fence) {
4407                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4408                         dma_fence_put(fence);
4409                         fence = next;
4410                         if (tmo == 0) {
4411                                 r = -ETIMEDOUT;
4412                                 break;
4413                         } else if (tmo < 0) {
4414                                 r = tmo;
4415                                 break;
4416                         }
4417                 } else {
4418                         fence = next;
4419                 }
4420         }
4421         mutex_unlock(&adev->shadow_list_lock);
4422
4423         if (fence)
4424                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4425         dma_fence_put(fence);
4426
4427         if (r < 0 || tmo <= 0) {
4428                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4429                 return -EIO;
4430         }
4431
4432         dev_info(adev->dev, "recover vram bo from shadow done\n");
4433         return 0;
4434 }
4435
4436
4437 /**
4438  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4439  *
4440  * @adev: amdgpu_device pointer
4441  * @from_hypervisor: request from hypervisor
4442  *
4443  * do VF FLR and reinitialize Asic
4444  * return 0 means succeeded otherwise failed
4445  */
4446 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4447                                      bool from_hypervisor)
4448 {
4449         int r;
4450         struct amdgpu_hive_info *hive = NULL;
4451         int retry_limit = 0;
4452
4453 retry:
4454         amdgpu_amdkfd_pre_reset(adev);
4455
4456         if (from_hypervisor)
4457                 r = amdgpu_virt_request_full_gpu(adev, true);
4458         else
4459                 r = amdgpu_virt_reset_gpu(adev);
4460         if (r)
4461                 return r;
4462
4463         /* Resume IP prior to SMC */
4464         r = amdgpu_device_ip_reinit_early_sriov(adev);
4465         if (r)
4466                 goto error;
4467
4468         amdgpu_virt_init_data_exchange(adev);
4469
4470         r = amdgpu_device_fw_loading(adev);
4471         if (r)
4472                 return r;
4473
4474         /* now we are okay to resume SMC/CP/SDMA */
4475         r = amdgpu_device_ip_reinit_late_sriov(adev);
4476         if (r)
4477                 goto error;
4478
4479         hive = amdgpu_get_xgmi_hive(adev);
4480         /* Update PSP FW topology after reset */
4481         if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4482                 r = amdgpu_xgmi_update_topology(hive, adev);
4483
4484         if (hive)
4485                 amdgpu_put_xgmi_hive(hive);
4486
4487         if (!r) {
4488                 amdgpu_irq_gpu_reset_resume_helper(adev);
4489                 r = amdgpu_ib_ring_tests(adev);
4490
4491                 amdgpu_amdkfd_post_reset(adev);
4492         }
4493
4494 error:
4495         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4496                 amdgpu_inc_vram_lost(adev);
4497                 r = amdgpu_device_recover_vram(adev);
4498         }
4499         amdgpu_virt_release_full_gpu(adev, true);
4500
4501         if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4502                 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4503                         retry_limit++;
4504                         goto retry;
4505                 } else
4506                         DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4507         }
4508
4509         return r;
4510 }
4511
4512 /**
4513  * amdgpu_device_has_job_running - check if there is any job in mirror list
4514  *
4515  * @adev: amdgpu_device pointer
4516  *
4517  * check if there is any job in mirror list
4518  */
4519 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4520 {
4521         int i;
4522         struct drm_sched_job *job;
4523
4524         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4525                 struct amdgpu_ring *ring = adev->rings[i];
4526
4527                 if (!ring || !ring->sched.thread)
4528                         continue;
4529
4530                 spin_lock(&ring->sched.job_list_lock);
4531                 job = list_first_entry_or_null(&ring->sched.pending_list,
4532                                                struct drm_sched_job, list);
4533                 spin_unlock(&ring->sched.job_list_lock);
4534                 if (job)
4535                         return true;
4536         }
4537         return false;
4538 }
4539
4540 /**
4541  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4542  *
4543  * @adev: amdgpu_device pointer
4544  *
4545  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4546  * a hung GPU.
4547  */
4548 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4549 {
4550
4551         if (amdgpu_gpu_recovery == 0)
4552                 goto disabled;
4553
4554         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4555                 dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
4556                 return false;
4557         }
4558
4559         if (amdgpu_sriov_vf(adev))
4560                 return true;
4561
4562         if (amdgpu_gpu_recovery == -1) {
4563                 switch (adev->asic_type) {
4564 #ifdef CONFIG_DRM_AMDGPU_SI
4565                 case CHIP_VERDE:
4566                 case CHIP_TAHITI:
4567                 case CHIP_PITCAIRN:
4568                 case CHIP_OLAND:
4569                 case CHIP_HAINAN:
4570 #endif
4571 #ifdef CONFIG_DRM_AMDGPU_CIK
4572                 case CHIP_KAVERI:
4573                 case CHIP_KABINI:
4574                 case CHIP_MULLINS:
4575 #endif
4576                 case CHIP_CARRIZO:
4577                 case CHIP_STONEY:
4578                 case CHIP_CYAN_SKILLFISH:
4579                         goto disabled;
4580                 default:
4581                         break;
4582                 }
4583         }
4584
4585         return true;
4586
4587 disabled:
4588                 dev_info(adev->dev, "GPU recovery disabled.\n");
4589                 return false;
4590 }
4591
4592 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4593 {
4594         u32 i;
4595         int ret = 0;
4596
4597         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4598
4599         dev_info(adev->dev, "GPU mode1 reset\n");
4600
4601         /* disable BM */
4602         pci_clear_master(adev->pdev);
4603
4604         amdgpu_device_cache_pci_state(adev->pdev);
4605
4606         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4607                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4608                 ret = amdgpu_dpm_mode1_reset(adev);
4609         } else {
4610                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4611                 ret = psp_gpu_reset(adev);
4612         }
4613
4614         if (ret)
4615                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4616
4617         amdgpu_device_load_pci_state(adev->pdev);
4618
4619         /* wait for asic to come out of reset */
4620         for (i = 0; i < adev->usec_timeout; i++) {
4621                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4622
4623                 if (memsize != 0xffffffff)
4624                         break;
4625                 udelay(1);
4626         }
4627
4628         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4629         return ret;
4630 }
4631
4632 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4633                                  struct amdgpu_reset_context *reset_context)
4634 {
4635         int i, r = 0;
4636         struct amdgpu_job *job = NULL;
4637         bool need_full_reset =
4638                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4639
4640         if (reset_context->reset_req_dev == adev)
4641                 job = reset_context->job;
4642
4643         if (amdgpu_sriov_vf(adev)) {
4644                 /* stop the data exchange thread */
4645                 amdgpu_virt_fini_data_exchange(adev);
4646         }
4647
4648         amdgpu_fence_driver_isr_toggle(adev, true);
4649
4650         /* block all schedulers and reset given job's ring */
4651         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4652                 struct amdgpu_ring *ring = adev->rings[i];
4653
4654                 if (!ring || !ring->sched.thread)
4655                         continue;
4656
4657                 /*clear job fence from fence drv to avoid force_completion
4658                  *leave NULL and vm flush fence in fence drv */
4659                 amdgpu_fence_driver_clear_job_fences(ring);
4660
4661                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4662                 amdgpu_fence_driver_force_completion(ring);
4663         }
4664
4665         amdgpu_fence_driver_isr_toggle(adev, false);
4666
4667         if (job && job->vm)
4668                 drm_sched_increase_karma(&job->base);
4669
4670         r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4671         /* If reset handler not implemented, continue; otherwise return */
4672         if (r == -ENOSYS)
4673                 r = 0;
4674         else
4675                 return r;
4676
4677         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4678         if (!amdgpu_sriov_vf(adev)) {
4679
4680                 if (!need_full_reset)
4681                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4682
4683                 if (!need_full_reset && amdgpu_gpu_recovery) {
4684                         amdgpu_device_ip_pre_soft_reset(adev);
4685                         r = amdgpu_device_ip_soft_reset(adev);
4686                         amdgpu_device_ip_post_soft_reset(adev);
4687                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4688                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4689                                 need_full_reset = true;
4690                         }
4691                 }
4692
4693                 if (need_full_reset)
4694                         r = amdgpu_device_ip_suspend(adev);
4695                 if (need_full_reset)
4696                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4697                 else
4698                         clear_bit(AMDGPU_NEED_FULL_RESET,
4699                                   &reset_context->flags);
4700         }
4701
4702         return r;
4703 }
4704
4705 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4706 {
4707         int i;
4708
4709         lockdep_assert_held(&adev->reset_domain->sem);
4710
4711         for (i = 0; i < adev->num_regs; i++) {
4712                 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4713                 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4714                                              adev->reset_dump_reg_value[i]);
4715         }
4716
4717         return 0;
4718 }
4719
4720 #ifdef CONFIG_DEV_COREDUMP
4721 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4722                 size_t count, void *data, size_t datalen)
4723 {
4724         struct drm_printer p;
4725         struct amdgpu_device *adev = data;
4726         struct drm_print_iterator iter;
4727         int i;
4728
4729         iter.data = buffer;
4730         iter.offset = 0;
4731         iter.start = offset;
4732         iter.remain = count;
4733
4734         p = drm_coredump_printer(&iter);
4735
4736         drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4737         drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4738         drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4739         drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4740         if (adev->reset_task_info.pid)
4741                 drm_printf(&p, "process_name: %s PID: %d\n",
4742                            adev->reset_task_info.process_name,
4743                            adev->reset_task_info.pid);
4744
4745         if (adev->reset_vram_lost)
4746                 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4747         if (adev->num_regs) {
4748                 drm_printf(&p, "AMDGPU register dumps:\nOffset:     Value:\n");
4749
4750                 for (i = 0; i < adev->num_regs; i++)
4751                         drm_printf(&p, "0x%08x: 0x%08x\n",
4752                                    adev->reset_dump_reg_list[i],
4753                                    adev->reset_dump_reg_value[i]);
4754         }
4755
4756         return count - iter.remain;
4757 }
4758
4759 static void amdgpu_devcoredump_free(void *data)
4760 {
4761 }
4762
4763 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4764 {
4765         struct drm_device *dev = adev_to_drm(adev);
4766
4767         ktime_get_ts64(&adev->reset_time);
4768         dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4769                       amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4770 }
4771 #endif
4772
4773 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4774                          struct amdgpu_reset_context *reset_context)
4775 {
4776         struct amdgpu_device *tmp_adev = NULL;
4777         bool need_full_reset, skip_hw_reset, vram_lost = false;
4778         int r = 0;
4779         bool gpu_reset_for_dev_remove = 0;
4780
4781         /* Try reset handler method first */
4782         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4783                                     reset_list);
4784         amdgpu_reset_reg_dumps(tmp_adev);
4785
4786         reset_context->reset_device_list = device_list_handle;
4787         r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4788         /* If reset handler not implemented, continue; otherwise return */
4789         if (r == -ENOSYS)
4790                 r = 0;
4791         else
4792                 return r;
4793
4794         /* Reset handler not implemented, use the default method */
4795         need_full_reset =
4796                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4797         skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4798
4799         gpu_reset_for_dev_remove =
4800                 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4801                         test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4802
4803         /*
4804          * ASIC reset has to be done on all XGMI hive nodes ASAP
4805          * to allow proper links negotiation in FW (within 1 sec)
4806          */
4807         if (!skip_hw_reset && need_full_reset) {
4808                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4809                         /* For XGMI run all resets in parallel to speed up the process */
4810                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4811                                 tmp_adev->gmc.xgmi.pending_reset = false;
4812                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4813                                         r = -EALREADY;
4814                         } else
4815                                 r = amdgpu_asic_reset(tmp_adev);
4816
4817                         if (r) {
4818                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4819                                          r, adev_to_drm(tmp_adev)->unique);
4820                                 break;
4821                         }
4822                 }
4823
4824                 /* For XGMI wait for all resets to complete before proceed */
4825                 if (!r) {
4826                         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4827                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4828                                         flush_work(&tmp_adev->xgmi_reset_work);
4829                                         r = tmp_adev->asic_reset_res;
4830                                         if (r)
4831                                                 break;
4832                                 }
4833                         }
4834                 }
4835         }
4836
4837         if (!r && amdgpu_ras_intr_triggered()) {
4838                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4839                         if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4840                             tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4841                                 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4842                 }
4843
4844                 amdgpu_ras_intr_cleared();
4845         }
4846
4847         /* Since the mode1 reset affects base ip blocks, the
4848          * phase1 ip blocks need to be resumed. Otherwise there
4849          * will be a BIOS signature error and the psp bootloader
4850          * can't load kdb on the next amdgpu install.
4851          */
4852         if (gpu_reset_for_dev_remove) {
4853                 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
4854                         amdgpu_device_ip_resume_phase1(tmp_adev);
4855
4856                 goto end;
4857         }
4858
4859         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4860                 if (need_full_reset) {
4861                         /* post card */
4862                         r = amdgpu_device_asic_init(tmp_adev);
4863                         if (r) {
4864                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4865                         } else {
4866                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4867                                 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4868                                 if (r)
4869                                         goto out;
4870
4871                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4872                                 if (r)
4873                                         goto out;
4874
4875                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4876 #ifdef CONFIG_DEV_COREDUMP
4877                                 tmp_adev->reset_vram_lost = vram_lost;
4878                                 memset(&tmp_adev->reset_task_info, 0,
4879                                                 sizeof(tmp_adev->reset_task_info));
4880                                 if (reset_context->job && reset_context->job->vm)
4881                                         tmp_adev->reset_task_info =
4882                                                 reset_context->job->vm->task_info;
4883                                 amdgpu_reset_capture_coredumpm(tmp_adev);
4884 #endif
4885                                 if (vram_lost) {
4886                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4887                                         amdgpu_inc_vram_lost(tmp_adev);
4888                                 }
4889
4890                                 r = amdgpu_device_fw_loading(tmp_adev);
4891                                 if (r)
4892                                         return r;
4893
4894                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4895                                 if (r)
4896                                         goto out;
4897
4898                                 if (vram_lost)
4899                                         amdgpu_device_fill_reset_magic(tmp_adev);
4900
4901                                 /*
4902                                  * Add this ASIC as tracked as reset was already
4903                                  * complete successfully.
4904                                  */
4905                                 amdgpu_register_gpu_instance(tmp_adev);
4906
4907                                 if (!reset_context->hive &&
4908                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4909                                         amdgpu_xgmi_add_device(tmp_adev);
4910
4911                                 r = amdgpu_device_ip_late_init(tmp_adev);
4912                                 if (r)
4913                                         goto out;
4914
4915                                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4916
4917                                 /*
4918                                  * The GPU enters bad state once faulty pages
4919                                  * by ECC has reached the threshold, and ras
4920                                  * recovery is scheduled next. So add one check
4921                                  * here to break recovery if it indeed exceeds
4922                                  * bad page threshold, and remind user to
4923                                  * retire this GPU or setting one bigger
4924                                  * bad_page_threshold value to fix this once
4925                                  * probing driver again.
4926                                  */
4927                                 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4928                                         /* must succeed. */
4929                                         amdgpu_ras_resume(tmp_adev);
4930                                 } else {
4931                                         r = -EINVAL;
4932                                         goto out;
4933                                 }
4934
4935                                 /* Update PSP FW topology after reset */
4936                                 if (reset_context->hive &&
4937                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4938                                         r = amdgpu_xgmi_update_topology(
4939                                                 reset_context->hive, tmp_adev);
4940                         }
4941                 }
4942
4943 out:
4944                 if (!r) {
4945                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4946                         r = amdgpu_ib_ring_tests(tmp_adev);
4947                         if (r) {
4948                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4949                                 need_full_reset = true;
4950                                 r = -EAGAIN;
4951                                 goto end;
4952                         }
4953                 }
4954
4955                 if (!r)
4956                         r = amdgpu_device_recover_vram(tmp_adev);
4957                 else
4958                         tmp_adev->asic_reset_res = r;
4959         }
4960
4961 end:
4962         if (need_full_reset)
4963                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4964         else
4965                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4966         return r;
4967 }
4968
4969 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
4970 {
4971
4972         switch (amdgpu_asic_reset_method(adev)) {
4973         case AMD_RESET_METHOD_MODE1:
4974                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4975                 break;
4976         case AMD_RESET_METHOD_MODE2:
4977                 adev->mp1_state = PP_MP1_STATE_RESET;
4978                 break;
4979         default:
4980                 adev->mp1_state = PP_MP1_STATE_NONE;
4981                 break;
4982         }
4983 }
4984
4985 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
4986 {
4987         amdgpu_vf_error_trans_all(adev);
4988         adev->mp1_state = PP_MP1_STATE_NONE;
4989 }
4990
4991 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4992 {
4993         struct pci_dev *p = NULL;
4994
4995         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4996                         adev->pdev->bus->number, 1);
4997         if (p) {
4998                 pm_runtime_enable(&(p->dev));
4999                 pm_runtime_resume(&(p->dev));
5000         }
5001 }
5002
5003 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5004 {
5005         enum amd_reset_method reset_method;
5006         struct pci_dev *p = NULL;
5007         u64 expires;
5008
5009         /*
5010          * For now, only BACO and mode1 reset are confirmed
5011          * to suffer the audio issue without proper suspended.
5012          */
5013         reset_method = amdgpu_asic_reset_method(adev);
5014         if ((reset_method != AMD_RESET_METHOD_BACO) &&
5015              (reset_method != AMD_RESET_METHOD_MODE1))
5016                 return -EINVAL;
5017
5018         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5019                         adev->pdev->bus->number, 1);
5020         if (!p)
5021                 return -ENODEV;
5022
5023         expires = pm_runtime_autosuspend_expiration(&(p->dev));
5024         if (!expires)
5025                 /*
5026                  * If we cannot get the audio device autosuspend delay,
5027                  * a fixed 4S interval will be used. Considering 3S is
5028                  * the audio controller default autosuspend delay setting.
5029                  * 4S used here is guaranteed to cover that.
5030                  */
5031                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5032
5033         while (!pm_runtime_status_suspended(&(p->dev))) {
5034                 if (!pm_runtime_suspend(&(p->dev)))
5035                         break;
5036
5037                 if (expires < ktime_get_mono_fast_ns()) {
5038                         dev_warn(adev->dev, "failed to suspend display audio\n");
5039                         /* TODO: abort the succeeding gpu reset? */
5040                         return -ETIMEDOUT;
5041                 }
5042         }
5043
5044         pm_runtime_disable(&(p->dev));
5045
5046         return 0;
5047 }
5048
5049 static void amdgpu_device_recheck_guilty_jobs(
5050         struct amdgpu_device *adev, struct list_head *device_list_handle,
5051         struct amdgpu_reset_context *reset_context)
5052 {
5053         int i, r = 0;
5054
5055         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5056                 struct amdgpu_ring *ring = adev->rings[i];
5057                 int ret = 0;
5058                 struct drm_sched_job *s_job;
5059
5060                 if (!ring || !ring->sched.thread)
5061                         continue;
5062
5063                 s_job = list_first_entry_or_null(&ring->sched.pending_list,
5064                                 struct drm_sched_job, list);
5065                 if (s_job == NULL)
5066                         continue;
5067
5068                 /* clear job's guilty and depend the folowing step to decide the real one */
5069                 drm_sched_reset_karma(s_job);
5070                 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
5071
5072                 if (!s_job->s_fence->parent) {
5073                         DRM_WARN("Failed to get a HW fence for job!");
5074                         continue;
5075                 }
5076
5077                 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
5078                 if (ret == 0) { /* timeout */
5079                         DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
5080                                                 ring->sched.name, s_job->id);
5081
5082
5083                         amdgpu_fence_driver_isr_toggle(adev, true);
5084
5085                         /* Clear this failed job from fence array */
5086                         amdgpu_fence_driver_clear_job_fences(ring);
5087
5088                         amdgpu_fence_driver_isr_toggle(adev, false);
5089
5090                         /* Since the job won't signal and we go for
5091                          * another resubmit drop this parent pointer
5092                          */
5093                         dma_fence_put(s_job->s_fence->parent);
5094                         s_job->s_fence->parent = NULL;
5095
5096                         /* set guilty */
5097                         drm_sched_increase_karma(s_job);
5098                         amdgpu_reset_prepare_hwcontext(adev, reset_context);
5099 retry:
5100                         /* do hw reset */
5101                         if (amdgpu_sriov_vf(adev)) {
5102                                 amdgpu_virt_fini_data_exchange(adev);
5103                                 r = amdgpu_device_reset_sriov(adev, false);
5104                                 if (r)
5105                                         adev->asic_reset_res = r;
5106                         } else {
5107                                 clear_bit(AMDGPU_SKIP_HW_RESET,
5108                                           &reset_context->flags);
5109                                 r = amdgpu_do_asic_reset(device_list_handle,
5110                                                          reset_context);
5111                                 if (r && r == -EAGAIN)
5112                                         goto retry;
5113                         }
5114
5115                         /*
5116                          * add reset counter so that the following
5117                          * resubmitted job could flush vmid
5118                          */
5119                         atomic_inc(&adev->gpu_reset_counter);
5120                         continue;
5121                 }
5122
5123                 /* got the hw fence, signal finished fence */
5124                 atomic_dec(ring->sched.score);
5125                 dma_fence_get(&s_job->s_fence->finished);
5126                 dma_fence_signal(&s_job->s_fence->finished);
5127                 dma_fence_put(&s_job->s_fence->finished);
5128
5129                 /* remove node from list and free the job */
5130                 spin_lock(&ring->sched.job_list_lock);
5131                 list_del_init(&s_job->list);
5132                 spin_unlock(&ring->sched.job_list_lock);
5133                 ring->sched.ops->free_job(s_job);
5134         }
5135 }
5136
5137 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5138 {
5139         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5140
5141 #if defined(CONFIG_DEBUG_FS)
5142         if (!amdgpu_sriov_vf(adev))
5143                 cancel_work(&adev->reset_work);
5144 #endif
5145
5146         if (adev->kfd.dev)
5147                 cancel_work(&adev->kfd.reset_work);
5148
5149         if (amdgpu_sriov_vf(adev))
5150                 cancel_work(&adev->virt.flr_work);
5151
5152         if (con && adev->ras_enabled)
5153                 cancel_work(&con->recovery_work);
5154
5155 }
5156
5157
5158 /**
5159  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5160  *
5161  * @adev: amdgpu_device pointer
5162  * @job: which job trigger hang
5163  *
5164  * Attempt to reset the GPU if it has hung (all asics).
5165  * Attempt to do soft-reset or full-reset and reinitialize Asic
5166  * Returns 0 for success or an error on failure.
5167  */
5168
5169 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5170                               struct amdgpu_job *job,
5171                               struct amdgpu_reset_context *reset_context)
5172 {
5173         struct list_head device_list, *device_list_handle =  NULL;
5174         bool job_signaled = false;
5175         struct amdgpu_hive_info *hive = NULL;
5176         struct amdgpu_device *tmp_adev = NULL;
5177         int i, r = 0;
5178         bool need_emergency_restart = false;
5179         bool audio_suspended = false;
5180         int tmp_vram_lost_counter;
5181         bool gpu_reset_for_dev_remove = false;
5182
5183         gpu_reset_for_dev_remove =
5184                         test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5185                                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5186
5187         /*
5188          * Special case: RAS triggered and full reset isn't supported
5189          */
5190         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5191
5192         /*
5193          * Flush RAM to disk so that after reboot
5194          * the user can read log and see why the system rebooted.
5195          */
5196         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5197                 DRM_WARN("Emergency reboot.");
5198
5199                 ksys_sync_helper();
5200                 emergency_restart();
5201         }
5202
5203         dev_info(adev->dev, "GPU %s begin!\n",
5204                 need_emergency_restart ? "jobs stop":"reset");
5205
5206         if (!amdgpu_sriov_vf(adev))
5207                 hive = amdgpu_get_xgmi_hive(adev);
5208         if (hive)
5209                 mutex_lock(&hive->hive_lock);
5210
5211         reset_context->job = job;
5212         reset_context->hive = hive;
5213
5214         /*
5215          * Build list of devices to reset.
5216          * In case we are in XGMI hive mode, resort the device list
5217          * to put adev in the 1st position.
5218          */
5219         INIT_LIST_HEAD(&device_list);
5220         if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5221                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5222                         list_add_tail(&tmp_adev->reset_list, &device_list);
5223                         if (gpu_reset_for_dev_remove && adev->shutdown)
5224                                 tmp_adev->shutdown = true;
5225                 }
5226                 if (!list_is_first(&adev->reset_list, &device_list))
5227                         list_rotate_to_front(&adev->reset_list, &device_list);
5228                 device_list_handle = &device_list;
5229         } else {
5230                 list_add_tail(&adev->reset_list, &device_list);
5231                 device_list_handle = &device_list;
5232         }
5233
5234         /* We need to lock reset domain only once both for XGMI and single device */
5235         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5236                                     reset_list);
5237         amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5238
5239         /* block all schedulers and reset given job's ring */
5240         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5241
5242                 amdgpu_device_set_mp1_state(tmp_adev);
5243
5244                 /*
5245                  * Try to put the audio codec into suspend state
5246                  * before gpu reset started.
5247                  *
5248                  * Due to the power domain of the graphics device
5249                  * is shared with AZ power domain. Without this,
5250                  * we may change the audio hardware from behind
5251                  * the audio driver's back. That will trigger
5252                  * some audio codec errors.
5253                  */
5254                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5255                         audio_suspended = true;
5256
5257                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5258
5259                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5260
5261                 if (!amdgpu_sriov_vf(tmp_adev))
5262                         amdgpu_amdkfd_pre_reset(tmp_adev);
5263
5264                 /*
5265                  * Mark these ASICs to be reseted as untracked first
5266                  * And add them back after reset completed
5267                  */
5268                 amdgpu_unregister_gpu_instance(tmp_adev);
5269
5270                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5271
5272                 /* disable ras on ALL IPs */
5273                 if (!need_emergency_restart &&
5274                       amdgpu_device_ip_need_full_reset(tmp_adev))
5275                         amdgpu_ras_suspend(tmp_adev);
5276
5277                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5278                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5279
5280                         if (!ring || !ring->sched.thread)
5281                                 continue;
5282
5283                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5284
5285                         if (need_emergency_restart)
5286                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5287                 }
5288                 atomic_inc(&tmp_adev->gpu_reset_counter);
5289         }
5290
5291         if (need_emergency_restart)
5292                 goto skip_sched_resume;
5293
5294         /*
5295          * Must check guilty signal here since after this point all old
5296          * HW fences are force signaled.
5297          *
5298          * job->base holds a reference to parent fence
5299          */
5300         if (job && dma_fence_is_signaled(&job->hw_fence)) {
5301                 job_signaled = true;
5302                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5303                 goto skip_hw_reset;
5304         }
5305
5306 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
5307         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5308                 if (gpu_reset_for_dev_remove) {
5309                         /* Workaroud for ASICs need to disable SMC first */
5310                         amdgpu_device_smu_fini_early(tmp_adev);
5311                 }
5312                 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5313                 /*TODO Should we stop ?*/
5314                 if (r) {
5315                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5316                                   r, adev_to_drm(tmp_adev)->unique);
5317                         tmp_adev->asic_reset_res = r;
5318                 }
5319
5320                 /*
5321                  * Drop all pending non scheduler resets. Scheduler resets
5322                  * were already dropped during drm_sched_stop
5323                  */
5324                 amdgpu_device_stop_pending_resets(tmp_adev);
5325         }
5326
5327         tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5328         /* Actual ASIC resets if needed.*/
5329         /* Host driver will handle XGMI hive reset for SRIOV */
5330         if (amdgpu_sriov_vf(adev)) {
5331                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5332                 if (r)
5333                         adev->asic_reset_res = r;
5334
5335                 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5336                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5337                         amdgpu_ras_resume(adev);
5338         } else {
5339                 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5340                 if (r && r == -EAGAIN) {
5341                         set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags);
5342                         adev->asic_reset_res = 0;
5343                         goto retry;
5344                 }
5345
5346                 if (!r && gpu_reset_for_dev_remove)
5347                         goto recover_end;
5348         }
5349
5350 skip_hw_reset:
5351
5352         /* Post ASIC reset for all devs .*/
5353         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5354
5355                 /*
5356                  * Sometimes a later bad compute job can block a good gfx job as gfx
5357                  * and compute ring share internal GC HW mutually. We add an additional
5358                  * guilty jobs recheck step to find the real guilty job, it synchronously
5359                  * submits and pends for the first job being signaled. If it gets timeout,
5360                  * we identify it as a real guilty job.
5361                  */
5362                 if (amdgpu_gpu_recovery == 2 &&
5363                         !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5364                         amdgpu_device_recheck_guilty_jobs(
5365                                 tmp_adev, device_list_handle, reset_context);
5366
5367                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5368                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5369
5370                         if (!ring || !ring->sched.thread)
5371                                 continue;
5372
5373                         /* No point to resubmit jobs if we didn't HW reset*/
5374                         if (!tmp_adev->asic_reset_res && !job_signaled)
5375                                 drm_sched_resubmit_jobs(&ring->sched);
5376
5377                         drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5378                 }
5379
5380                 if (adev->enable_mes)
5381                         amdgpu_mes_self_test(tmp_adev);
5382
5383                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5384                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5385                 }
5386
5387                 if (tmp_adev->asic_reset_res)
5388                         r = tmp_adev->asic_reset_res;
5389
5390                 tmp_adev->asic_reset_res = 0;
5391
5392                 if (r) {
5393                         /* bad news, how to tell it to userspace ? */
5394                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5395                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5396                 } else {
5397                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5398                         if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5399                                 DRM_WARN("smart shift update failed\n");
5400                 }
5401         }
5402
5403 skip_sched_resume:
5404         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5405                 /* unlock kfd: SRIOV would do it separately */
5406                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5407                         amdgpu_amdkfd_post_reset(tmp_adev);
5408
5409                 /* kfd_post_reset will do nothing if kfd device is not initialized,
5410                  * need to bring up kfd here if it's not be initialized before
5411                  */
5412                 if (!adev->kfd.init_complete)
5413                         amdgpu_amdkfd_device_init(adev);
5414
5415                 if (audio_suspended)
5416                         amdgpu_device_resume_display_audio(tmp_adev);
5417
5418                 amdgpu_device_unset_mp1_state(tmp_adev);
5419         }
5420
5421 recover_end:
5422         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5423                                             reset_list);
5424         amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5425
5426         if (hive) {
5427                 mutex_unlock(&hive->hive_lock);
5428                 amdgpu_put_xgmi_hive(hive);
5429         }
5430
5431         if (r)
5432                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5433
5434         atomic_set(&adev->reset_domain->reset_res, r);
5435         return r;
5436 }
5437
5438 /**
5439  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5440  *
5441  * @adev: amdgpu_device pointer
5442  *
5443  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5444  * and lanes) of the slot the device is in. Handles APUs and
5445  * virtualized environments where PCIE config space may not be available.
5446  */
5447 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5448 {
5449         struct pci_dev *pdev;
5450         enum pci_bus_speed speed_cap, platform_speed_cap;
5451         enum pcie_link_width platform_link_width;
5452
5453         if (amdgpu_pcie_gen_cap)
5454                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5455
5456         if (amdgpu_pcie_lane_cap)
5457                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5458
5459         /* covers APUs as well */
5460         if (pci_is_root_bus(adev->pdev->bus)) {
5461                 if (adev->pm.pcie_gen_mask == 0)
5462                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5463                 if (adev->pm.pcie_mlw_mask == 0)
5464                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5465                 return;
5466         }
5467
5468         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5469                 return;
5470
5471         pcie_bandwidth_available(adev->pdev, NULL,
5472                                  &platform_speed_cap, &platform_link_width);
5473
5474         if (adev->pm.pcie_gen_mask == 0) {
5475                 /* asic caps */
5476                 pdev = adev->pdev;
5477                 speed_cap = pcie_get_speed_cap(pdev);
5478                 if (speed_cap == PCI_SPEED_UNKNOWN) {
5479                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5480                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5481                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5482                 } else {
5483                         if (speed_cap == PCIE_SPEED_32_0GT)
5484                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5485                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5486                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5487                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5488                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5489                         else if (speed_cap == PCIE_SPEED_16_0GT)
5490                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5491                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5492                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5493                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5494                         else if (speed_cap == PCIE_SPEED_8_0GT)
5495                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5496                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5497                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5498                         else if (speed_cap == PCIE_SPEED_5_0GT)
5499                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5500                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5501                         else
5502                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5503                 }
5504                 /* platform caps */
5505                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5506                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5507                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5508                 } else {
5509                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
5510                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5511                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5512                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5513                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5514                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5515                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5516                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5517                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5518                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5519                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5520                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5521                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5522                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5523                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5524                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5525                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5526                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5527                         else
5528                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5529
5530                 }
5531         }
5532         if (adev->pm.pcie_mlw_mask == 0) {
5533                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5534                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5535                 } else {
5536                         switch (platform_link_width) {
5537                         case PCIE_LNK_X32:
5538                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5539                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5540                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5541                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5542                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5543                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5544                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5545                                 break;
5546                         case PCIE_LNK_X16:
5547                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5548                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5549                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5550                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5551                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5552                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5553                                 break;
5554                         case PCIE_LNK_X12:
5555                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5556                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5557                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5558                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5559                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5560                                 break;
5561                         case PCIE_LNK_X8:
5562                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5563                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5564                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5565                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5566                                 break;
5567                         case PCIE_LNK_X4:
5568                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5569                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5570                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5571                                 break;
5572                         case PCIE_LNK_X2:
5573                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5574                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5575                                 break;
5576                         case PCIE_LNK_X1:
5577                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5578                                 break;
5579                         default:
5580                                 break;
5581                         }
5582                 }
5583         }
5584 }
5585
5586 /**
5587  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5588  *
5589  * @adev: amdgpu_device pointer
5590  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5591  *
5592  * Return true if @peer_adev can access (DMA) @adev through the PCIe
5593  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5594  * @peer_adev.
5595  */
5596 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5597                                       struct amdgpu_device *peer_adev)
5598 {
5599 #ifdef CONFIG_HSA_AMD_P2P
5600         uint64_t address_mask = peer_adev->dev->dma_mask ?
5601                 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5602         resource_size_t aper_limit =
5603                 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5604         bool p2p_access =
5605                 !adev->gmc.xgmi.connected_to_cpu &&
5606                 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5607
5608         return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5609                 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5610                 !(adev->gmc.aper_base & address_mask ||
5611                   aper_limit & address_mask));
5612 #else
5613         return false;
5614 #endif
5615 }
5616
5617 int amdgpu_device_baco_enter(struct drm_device *dev)
5618 {
5619         struct amdgpu_device *adev = drm_to_adev(dev);
5620         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5621
5622         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5623                 return -ENOTSUPP;
5624
5625         if (ras && adev->ras_enabled &&
5626             adev->nbio.funcs->enable_doorbell_interrupt)
5627                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5628
5629         return amdgpu_dpm_baco_enter(adev);
5630 }
5631
5632 int amdgpu_device_baco_exit(struct drm_device *dev)
5633 {
5634         struct amdgpu_device *adev = drm_to_adev(dev);
5635         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5636         int ret = 0;
5637
5638         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5639                 return -ENOTSUPP;
5640
5641         ret = amdgpu_dpm_baco_exit(adev);
5642         if (ret)
5643                 return ret;
5644
5645         if (ras && adev->ras_enabled &&
5646             adev->nbio.funcs->enable_doorbell_interrupt)
5647                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5648
5649         if (amdgpu_passthrough(adev) &&
5650             adev->nbio.funcs->clear_doorbell_interrupt)
5651                 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5652
5653         return 0;
5654 }
5655
5656 /**
5657  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5658  * @pdev: PCI device struct
5659  * @state: PCI channel state
5660  *
5661  * Description: Called when a PCI error is detected.
5662  *
5663  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5664  */
5665 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5666 {
5667         struct drm_device *dev = pci_get_drvdata(pdev);
5668         struct amdgpu_device *adev = drm_to_adev(dev);
5669         int i;
5670
5671         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5672
5673         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5674                 DRM_WARN("No support for XGMI hive yet...");
5675                 return PCI_ERS_RESULT_DISCONNECT;
5676         }
5677
5678         adev->pci_channel_state = state;
5679
5680         switch (state) {
5681         case pci_channel_io_normal:
5682                 return PCI_ERS_RESULT_CAN_RECOVER;
5683         /* Fatal error, prepare for slot reset */
5684         case pci_channel_io_frozen:
5685                 /*
5686                  * Locking adev->reset_domain->sem will prevent any external access
5687                  * to GPU during PCI error recovery
5688                  */
5689                 amdgpu_device_lock_reset_domain(adev->reset_domain);
5690                 amdgpu_device_set_mp1_state(adev);
5691
5692                 /*
5693                  * Block any work scheduling as we do for regular GPU reset
5694                  * for the duration of the recovery
5695                  */
5696                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5697                         struct amdgpu_ring *ring = adev->rings[i];
5698
5699                         if (!ring || !ring->sched.thread)
5700                                 continue;
5701
5702                         drm_sched_stop(&ring->sched, NULL);
5703                 }
5704                 atomic_inc(&adev->gpu_reset_counter);
5705                 return PCI_ERS_RESULT_NEED_RESET;
5706         case pci_channel_io_perm_failure:
5707                 /* Permanent error, prepare for device removal */
5708                 return PCI_ERS_RESULT_DISCONNECT;
5709         }
5710
5711         return PCI_ERS_RESULT_NEED_RESET;
5712 }
5713
5714 /**
5715  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5716  * @pdev: pointer to PCI device
5717  */
5718 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5719 {
5720
5721         DRM_INFO("PCI error: mmio enabled callback!!\n");
5722
5723         /* TODO - dump whatever for debugging purposes */
5724
5725         /* This called only if amdgpu_pci_error_detected returns
5726          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5727          * works, no need to reset slot.
5728          */
5729
5730         return PCI_ERS_RESULT_RECOVERED;
5731 }
5732
5733 /**
5734  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5735  * @pdev: PCI device struct
5736  *
5737  * Description: This routine is called by the pci error recovery
5738  * code after the PCI slot has been reset, just before we
5739  * should resume normal operations.
5740  */
5741 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5742 {
5743         struct drm_device *dev = pci_get_drvdata(pdev);
5744         struct amdgpu_device *adev = drm_to_adev(dev);
5745         int r, i;
5746         struct amdgpu_reset_context reset_context;
5747         u32 memsize;
5748         struct list_head device_list;
5749
5750         DRM_INFO("PCI error: slot reset callback!!\n");
5751
5752         memset(&reset_context, 0, sizeof(reset_context));
5753
5754         INIT_LIST_HEAD(&device_list);
5755         list_add_tail(&adev->reset_list, &device_list);
5756
5757         /* wait for asic to come out of reset */
5758         msleep(500);
5759
5760         /* Restore PCI confspace */
5761         amdgpu_device_load_pci_state(pdev);
5762
5763         /* confirm  ASIC came out of reset */
5764         for (i = 0; i < adev->usec_timeout; i++) {
5765                 memsize = amdgpu_asic_get_config_memsize(adev);
5766
5767                 if (memsize != 0xffffffff)
5768                         break;
5769                 udelay(1);
5770         }
5771         if (memsize == 0xffffffff) {
5772                 r = -ETIME;
5773                 goto out;
5774         }
5775
5776         reset_context.method = AMD_RESET_METHOD_NONE;
5777         reset_context.reset_req_dev = adev;
5778         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5779         set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5780         set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
5781
5782         adev->no_hw_access = true;
5783         r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5784         adev->no_hw_access = false;
5785         if (r)
5786                 goto out;
5787
5788         r = amdgpu_do_asic_reset(&device_list, &reset_context);
5789
5790 out:
5791         if (!r) {
5792                 if (amdgpu_device_cache_pci_state(adev->pdev))
5793                         pci_restore_state(adev->pdev);
5794
5795                 DRM_INFO("PCIe error recovery succeeded\n");
5796         } else {
5797                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5798                 amdgpu_device_unset_mp1_state(adev);
5799                 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5800         }
5801
5802         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5803 }
5804
5805 /**
5806  * amdgpu_pci_resume() - resume normal ops after PCI reset
5807  * @pdev: pointer to PCI device
5808  *
5809  * Called when the error recovery driver tells us that its
5810  * OK to resume normal operation.
5811  */
5812 void amdgpu_pci_resume(struct pci_dev *pdev)
5813 {
5814         struct drm_device *dev = pci_get_drvdata(pdev);
5815         struct amdgpu_device *adev = drm_to_adev(dev);
5816         int i;
5817
5818
5819         DRM_INFO("PCI error: resume callback!!\n");
5820
5821         /* Only continue execution for the case of pci_channel_io_frozen */
5822         if (adev->pci_channel_state != pci_channel_io_frozen)
5823                 return;
5824
5825         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5826                 struct amdgpu_ring *ring = adev->rings[i];
5827
5828                 if (!ring || !ring->sched.thread)
5829                         continue;
5830
5831
5832                 drm_sched_resubmit_jobs(&ring->sched);
5833                 drm_sched_start(&ring->sched, true);
5834         }
5835
5836         amdgpu_device_unset_mp1_state(adev);
5837         amdgpu_device_unlock_reset_domain(adev->reset_domain);
5838 }
5839
5840 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5841 {
5842         struct drm_device *dev = pci_get_drvdata(pdev);
5843         struct amdgpu_device *adev = drm_to_adev(dev);
5844         int r;
5845
5846         r = pci_save_state(pdev);
5847         if (!r) {
5848                 kfree(adev->pci_state);
5849
5850                 adev->pci_state = pci_store_saved_state(pdev);
5851
5852                 if (!adev->pci_state) {
5853                         DRM_ERROR("Failed to store PCI saved state");
5854                         return false;
5855                 }
5856         } else {
5857                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5858                 return false;
5859         }
5860
5861         return true;
5862 }
5863
5864 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5865 {
5866         struct drm_device *dev = pci_get_drvdata(pdev);
5867         struct amdgpu_device *adev = drm_to_adev(dev);
5868         int r;
5869
5870         if (!adev->pci_state)
5871                 return false;
5872
5873         r = pci_load_saved_state(pdev, adev->pci_state);
5874
5875         if (!r) {
5876                 pci_restore_state(pdev);
5877         } else {
5878                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5879                 return false;
5880         }
5881
5882         return true;
5883 }
5884
5885 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5886                 struct amdgpu_ring *ring)
5887 {
5888 #ifdef CONFIG_X86_64
5889         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5890                 return;
5891 #endif
5892         if (adev->gmc.xgmi.connected_to_cpu)
5893                 return;
5894
5895         if (ring && ring->funcs->emit_hdp_flush)
5896                 amdgpu_ring_emit_hdp_flush(ring);
5897         else
5898                 amdgpu_asic_flush_hdp(adev, ring);
5899 }
5900
5901 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5902                 struct amdgpu_ring *ring)
5903 {
5904 #ifdef CONFIG_X86_64
5905         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5906                 return;
5907 #endif
5908         if (adev->gmc.xgmi.connected_to_cpu)
5909                 return;
5910
5911         amdgpu_asic_invalidate_hdp(adev, ring);
5912 }
5913
5914 int amdgpu_in_reset(struct amdgpu_device *adev)
5915 {
5916         return atomic_read(&adev->reset_domain->in_gpu_reset);
5917         }
5918         
5919 /**
5920  * amdgpu_device_halt() - bring hardware to some kind of halt state
5921  *
5922  * @adev: amdgpu_device pointer
5923  *
5924  * Bring hardware to some kind of halt state so that no one can touch it
5925  * any more. It will help to maintain error context when error occurred.
5926  * Compare to a simple hang, the system will keep stable at least for SSH
5927  * access. Then it should be trivial to inspect the hardware state and
5928  * see what's going on. Implemented as following:
5929  *
5930  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5931  *    clears all CPU mappings to device, disallows remappings through page faults
5932  * 2. amdgpu_irq_disable_all() disables all interrupts
5933  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5934  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5935  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5936  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5937  *    flush any in flight DMA operations
5938  */
5939 void amdgpu_device_halt(struct amdgpu_device *adev)
5940 {
5941         struct pci_dev *pdev = adev->pdev;
5942         struct drm_device *ddev = adev_to_drm(adev);
5943
5944         drm_dev_unplug(ddev);
5945
5946         amdgpu_irq_disable_all(adev);
5947
5948         amdgpu_fence_driver_hw_fini(adev);
5949
5950         adev->no_hw_access = true;
5951
5952         amdgpu_device_unmap_mmio(adev);
5953
5954         pci_disable_device(pdev);
5955         pci_wait_for_pending_transaction(pdev);
5956 }
5957
5958 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5959                                 u32 reg)
5960 {
5961         unsigned long flags, address, data;
5962         u32 r;
5963
5964         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5965         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5966
5967         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5968         WREG32(address, reg * 4);
5969         (void)RREG32(address);
5970         r = RREG32(data);
5971         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5972         return r;
5973 }
5974
5975 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5976                                 u32 reg, u32 v)
5977 {
5978         unsigned long flags, address, data;
5979
5980         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5981         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5982
5983         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5984         WREG32(address, reg * 4);
5985         (void)RREG32(address);
5986         WREG32(data, v);
5987         (void)RREG32(data);
5988         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5989 }
5990
5991 /**
5992  * amdgpu_device_switch_gang - switch to a new gang
5993  * @adev: amdgpu_device pointer
5994  * @gang: the gang to switch to
5995  *
5996  * Try to switch to a new gang.
5997  * Returns: NULL if we switched to the new gang or a reference to the current
5998  * gang leader.
5999  */
6000 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6001                                             struct dma_fence *gang)
6002 {
6003         struct dma_fence *old = NULL;
6004
6005         do {
6006                 dma_fence_put(old);
6007                 rcu_read_lock();
6008                 old = dma_fence_get_rcu_safe(&adev->gang_submit);
6009                 rcu_read_unlock();
6010
6011                 if (old == gang)
6012                         break;
6013
6014                 if (!dma_fence_is_signaled(old))
6015                         return old;
6016
6017         } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6018                          old, gang) != old);
6019
6020         dma_fence_put(old);
6021         return NULL;
6022 }
This page took 0.4044 seconds and 4 git commands to generate.