]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
Merge tag 'v6.0-rc6' into locking/core, to refresh the branch
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_probe_helper.h>
41 #include <drm/amdgpu_drm.h>
42 #include <linux/vgaarb.h>
43 #include <linux/vga_switcheroo.h>
44 #include <linux/efi.h>
45 #include "amdgpu.h"
46 #include "amdgpu_trace.h"
47 #include "amdgpu_i2c.h"
48 #include "atom.h"
49 #include "amdgpu_atombios.h"
50 #include "amdgpu_atomfirmware.h"
51 #include "amd_pcie.h"
52 #ifdef CONFIG_DRM_AMDGPU_SI
53 #include "si.h"
54 #endif
55 #ifdef CONFIG_DRM_AMDGPU_CIK
56 #include "cik.h"
57 #endif
58 #include "vi.h"
59 #include "soc15.h"
60 #include "nv.h"
61 #include "bif/bif_4_1_d.h"
62 #include <linux/firmware.h>
63 #include "amdgpu_vf_error.h"
64
65 #include "amdgpu_amdkfd.h"
66 #include "amdgpu_pm.h"
67
68 #include "amdgpu_xgmi.h"
69 #include "amdgpu_ras.h"
70 #include "amdgpu_pmu.h"
71 #include "amdgpu_fru_eeprom.h"
72 #include "amdgpu_reset.h"
73
74 #include <linux/suspend.h>
75 #include <drm/task_barrier.h>
76 #include <linux/pm_runtime.h>
77
78 #include <drm/drm_drv.h>
79
80 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
87
88 #define AMDGPU_RESUME_MS                2000
89 #define AMDGPU_MAX_RETRY_LIMIT          2
90 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
91
92 const char *amdgpu_asic_name[] = {
93         "TAHITI",
94         "PITCAIRN",
95         "VERDE",
96         "OLAND",
97         "HAINAN",
98         "BONAIRE",
99         "KAVERI",
100         "KABINI",
101         "HAWAII",
102         "MULLINS",
103         "TOPAZ",
104         "TONGA",
105         "FIJI",
106         "CARRIZO",
107         "STONEY",
108         "POLARIS10",
109         "POLARIS11",
110         "POLARIS12",
111         "VEGAM",
112         "VEGA10",
113         "VEGA12",
114         "VEGA20",
115         "RAVEN",
116         "ARCTURUS",
117         "RENOIR",
118         "ALDEBARAN",
119         "NAVI10",
120         "CYAN_SKILLFISH",
121         "NAVI14",
122         "NAVI12",
123         "SIENNA_CICHLID",
124         "NAVY_FLOUNDER",
125         "VANGOGH",
126         "DIMGREY_CAVEFISH",
127         "BEIGE_GOBY",
128         "YELLOW_CARP",
129         "IP DISCOVERY",
130         "LAST",
131 };
132
133 /**
134  * DOC: pcie_replay_count
135  *
136  * The amdgpu driver provides a sysfs API for reporting the total number
137  * of PCIe replays (NAKs)
138  * The file pcie_replay_count is used for this and returns the total
139  * number of replays as a sum of the NAKs generated and NAKs received
140  */
141
142 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
143                 struct device_attribute *attr, char *buf)
144 {
145         struct drm_device *ddev = dev_get_drvdata(dev);
146         struct amdgpu_device *adev = drm_to_adev(ddev);
147         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
148
149         return sysfs_emit(buf, "%llu\n", cnt);
150 }
151
152 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
153                 amdgpu_device_get_pcie_replay_count, NULL);
154
155 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
156
157 /**
158  * DOC: product_name
159  *
160  * The amdgpu driver provides a sysfs API for reporting the product name
161  * for the device
162  * The file serial_number is used for this and returns the product name
163  * as returned from the FRU.
164  * NOTE: This is only available for certain server cards
165  */
166
167 static ssize_t amdgpu_device_get_product_name(struct device *dev,
168                 struct device_attribute *attr, char *buf)
169 {
170         struct drm_device *ddev = dev_get_drvdata(dev);
171         struct amdgpu_device *adev = drm_to_adev(ddev);
172
173         return sysfs_emit(buf, "%s\n", adev->product_name);
174 }
175
176 static DEVICE_ATTR(product_name, S_IRUGO,
177                 amdgpu_device_get_product_name, NULL);
178
179 /**
180  * DOC: product_number
181  *
182  * The amdgpu driver provides a sysfs API for reporting the part number
183  * for the device
184  * The file serial_number is used for this and returns the part number
185  * as returned from the FRU.
186  * NOTE: This is only available for certain server cards
187  */
188
189 static ssize_t amdgpu_device_get_product_number(struct device *dev,
190                 struct device_attribute *attr, char *buf)
191 {
192         struct drm_device *ddev = dev_get_drvdata(dev);
193         struct amdgpu_device *adev = drm_to_adev(ddev);
194
195         return sysfs_emit(buf, "%s\n", adev->product_number);
196 }
197
198 static DEVICE_ATTR(product_number, S_IRUGO,
199                 amdgpu_device_get_product_number, NULL);
200
201 /**
202  * DOC: serial_number
203  *
204  * The amdgpu driver provides a sysfs API for reporting the serial number
205  * for the device
206  * The file serial_number is used for this and returns the serial number
207  * as returned from the FRU.
208  * NOTE: This is only available for certain server cards
209  */
210
211 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
212                 struct device_attribute *attr, char *buf)
213 {
214         struct drm_device *ddev = dev_get_drvdata(dev);
215         struct amdgpu_device *adev = drm_to_adev(ddev);
216
217         return sysfs_emit(buf, "%s\n", adev->serial);
218 }
219
220 static DEVICE_ATTR(serial_number, S_IRUGO,
221                 amdgpu_device_get_serial_number, NULL);
222
223 /**
224  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
225  *
226  * @dev: drm_device pointer
227  *
228  * Returns true if the device is a dGPU with ATPX power control,
229  * otherwise return false.
230  */
231 bool amdgpu_device_supports_px(struct drm_device *dev)
232 {
233         struct amdgpu_device *adev = drm_to_adev(dev);
234
235         if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
236                 return true;
237         return false;
238 }
239
240 /**
241  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
242  *
243  * @dev: drm_device pointer
244  *
245  * Returns true if the device is a dGPU with ACPI power control,
246  * otherwise return false.
247  */
248 bool amdgpu_device_supports_boco(struct drm_device *dev)
249 {
250         struct amdgpu_device *adev = drm_to_adev(dev);
251
252         if (adev->has_pr3 ||
253             ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
254                 return true;
255         return false;
256 }
257
258 /**
259  * amdgpu_device_supports_baco - Does the device support BACO
260  *
261  * @dev: drm_device pointer
262  *
263  * Returns true if the device supporte BACO,
264  * otherwise return false.
265  */
266 bool amdgpu_device_supports_baco(struct drm_device *dev)
267 {
268         struct amdgpu_device *adev = drm_to_adev(dev);
269
270         return amdgpu_asic_supports_baco(adev);
271 }
272
273 /**
274  * amdgpu_device_supports_smart_shift - Is the device dGPU with
275  * smart shift support
276  *
277  * @dev: drm_device pointer
278  *
279  * Returns true if the device is a dGPU with Smart Shift support,
280  * otherwise returns false.
281  */
282 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
283 {
284         return (amdgpu_device_supports_boco(dev) &&
285                 amdgpu_acpi_is_power_shift_control_supported());
286 }
287
288 /*
289  * VRAM access helper functions
290  */
291
292 /**
293  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
294  *
295  * @adev: amdgpu_device pointer
296  * @pos: offset of the buffer in vram
297  * @buf: virtual address of the buffer in system memory
298  * @size: read/write size, sizeof(@buf) must > @size
299  * @write: true - write to vram, otherwise - read from vram
300  */
301 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
302                              void *buf, size_t size, bool write)
303 {
304         unsigned long flags;
305         uint32_t hi = ~0, tmp = 0;
306         uint32_t *data = buf;
307         uint64_t last;
308         int idx;
309
310         if (!drm_dev_enter(adev_to_drm(adev), &idx))
311                 return;
312
313         BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
314
315         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
316         for (last = pos + size; pos < last; pos += 4) {
317                 tmp = pos >> 31;
318
319                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
320                 if (tmp != hi) {
321                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
322                         hi = tmp;
323                 }
324                 if (write)
325                         WREG32_NO_KIQ(mmMM_DATA, *data++);
326                 else
327                         *data++ = RREG32_NO_KIQ(mmMM_DATA);
328         }
329
330         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
331         drm_dev_exit(idx);
332 }
333
334 /**
335  * amdgpu_device_aper_access - access vram by vram aperature
336  *
337  * @adev: amdgpu_device pointer
338  * @pos: offset of the buffer in vram
339  * @buf: virtual address of the buffer in system memory
340  * @size: read/write size, sizeof(@buf) must > @size
341  * @write: true - write to vram, otherwise - read from vram
342  *
343  * The return value means how many bytes have been transferred.
344  */
345 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
346                                  void *buf, size_t size, bool write)
347 {
348 #ifdef CONFIG_64BIT
349         void __iomem *addr;
350         size_t count = 0;
351         uint64_t last;
352
353         if (!adev->mman.aper_base_kaddr)
354                 return 0;
355
356         last = min(pos + size, adev->gmc.visible_vram_size);
357         if (last > pos) {
358                 addr = adev->mman.aper_base_kaddr + pos;
359                 count = last - pos;
360
361                 if (write) {
362                         memcpy_toio(addr, buf, count);
363                         mb();
364                         amdgpu_device_flush_hdp(adev, NULL);
365                 } else {
366                         amdgpu_device_invalidate_hdp(adev, NULL);
367                         mb();
368                         memcpy_fromio(buf, addr, count);
369                 }
370
371         }
372
373         return count;
374 #else
375         return 0;
376 #endif
377 }
378
379 /**
380  * amdgpu_device_vram_access - read/write a buffer in vram
381  *
382  * @adev: amdgpu_device pointer
383  * @pos: offset of the buffer in vram
384  * @buf: virtual address of the buffer in system memory
385  * @size: read/write size, sizeof(@buf) must > @size
386  * @write: true - write to vram, otherwise - read from vram
387  */
388 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
389                                void *buf, size_t size, bool write)
390 {
391         size_t count;
392
393         /* try to using vram apreature to access vram first */
394         count = amdgpu_device_aper_access(adev, pos, buf, size, write);
395         size -= count;
396         if (size) {
397                 /* using MM to access rest vram */
398                 pos += count;
399                 buf += count;
400                 amdgpu_device_mm_access(adev, pos, buf, size, write);
401         }
402 }
403
404 /*
405  * register access helper functions.
406  */
407
408 /* Check if hw access should be skipped because of hotplug or device error */
409 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
410 {
411         if (adev->no_hw_access)
412                 return true;
413
414 #ifdef CONFIG_LOCKDEP
415         /*
416          * This is a bit complicated to understand, so worth a comment. What we assert
417          * here is that the GPU reset is not running on another thread in parallel.
418          *
419          * For this we trylock the read side of the reset semaphore, if that succeeds
420          * we know that the reset is not running in paralell.
421          *
422          * If the trylock fails we assert that we are either already holding the read
423          * side of the lock or are the reset thread itself and hold the write side of
424          * the lock.
425          */
426         if (in_task()) {
427                 if (down_read_trylock(&adev->reset_domain->sem))
428                         up_read(&adev->reset_domain->sem);
429                 else
430                         lockdep_assert_held(&adev->reset_domain->sem);
431         }
432 #endif
433         return false;
434 }
435
436 /**
437  * amdgpu_device_rreg - read a memory mapped IO or indirect register
438  *
439  * @adev: amdgpu_device pointer
440  * @reg: dword aligned register offset
441  * @acc_flags: access flags which require special behavior
442  *
443  * Returns the 32 bit value from the offset specified.
444  */
445 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
446                             uint32_t reg, uint32_t acc_flags)
447 {
448         uint32_t ret;
449
450         if (amdgpu_device_skip_hw_access(adev))
451                 return 0;
452
453         if ((reg * 4) < adev->rmmio_size) {
454                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
455                     amdgpu_sriov_runtime(adev) &&
456                     down_read_trylock(&adev->reset_domain->sem)) {
457                         ret = amdgpu_kiq_rreg(adev, reg);
458                         up_read(&adev->reset_domain->sem);
459                 } else {
460                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
461                 }
462         } else {
463                 ret = adev->pcie_rreg(adev, reg * 4);
464         }
465
466         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
467
468         return ret;
469 }
470
471 /*
472  * MMIO register read with bytes helper functions
473  * @offset:bytes offset from MMIO start
474  *
475 */
476
477 /**
478  * amdgpu_mm_rreg8 - read a memory mapped IO register
479  *
480  * @adev: amdgpu_device pointer
481  * @offset: byte aligned register offset
482  *
483  * Returns the 8 bit value from the offset specified.
484  */
485 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
486 {
487         if (amdgpu_device_skip_hw_access(adev))
488                 return 0;
489
490         if (offset < adev->rmmio_size)
491                 return (readb(adev->rmmio + offset));
492         BUG();
493 }
494
495 /*
496  * MMIO register write with bytes helper functions
497  * @offset:bytes offset from MMIO start
498  * @value: the value want to be written to the register
499  *
500 */
501 /**
502  * amdgpu_mm_wreg8 - read a memory mapped IO register
503  *
504  * @adev: amdgpu_device pointer
505  * @offset: byte aligned register offset
506  * @value: 8 bit value to write
507  *
508  * Writes the value specified to the offset specified.
509  */
510 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
511 {
512         if (amdgpu_device_skip_hw_access(adev))
513                 return;
514
515         if (offset < adev->rmmio_size)
516                 writeb(value, adev->rmmio + offset);
517         else
518                 BUG();
519 }
520
521 /**
522  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
523  *
524  * @adev: amdgpu_device pointer
525  * @reg: dword aligned register offset
526  * @v: 32 bit value to write to the register
527  * @acc_flags: access flags which require special behavior
528  *
529  * Writes the value specified to the offset specified.
530  */
531 void amdgpu_device_wreg(struct amdgpu_device *adev,
532                         uint32_t reg, uint32_t v,
533                         uint32_t acc_flags)
534 {
535         if (amdgpu_device_skip_hw_access(adev))
536                 return;
537
538         if ((reg * 4) < adev->rmmio_size) {
539                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
540                     amdgpu_sriov_runtime(adev) &&
541                     down_read_trylock(&adev->reset_domain->sem)) {
542                         amdgpu_kiq_wreg(adev, reg, v);
543                         up_read(&adev->reset_domain->sem);
544                 } else {
545                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
546                 }
547         } else {
548                 adev->pcie_wreg(adev, reg * 4, v);
549         }
550
551         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
552 }
553
554 /**
555  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
556  *
557  * @adev: amdgpu_device pointer
558  * @reg: mmio/rlc register
559  * @v: value to write
560  *
561  * this function is invoked only for the debugfs register access
562  */
563 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
564                              uint32_t reg, uint32_t v)
565 {
566         if (amdgpu_device_skip_hw_access(adev))
567                 return;
568
569         if (amdgpu_sriov_fullaccess(adev) &&
570             adev->gfx.rlc.funcs &&
571             adev->gfx.rlc.funcs->is_rlcg_access_range) {
572                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
573                         return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
574         } else if ((reg * 4) >= adev->rmmio_size) {
575                 adev->pcie_wreg(adev, reg * 4, v);
576         } else {
577                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
578         }
579 }
580
581 /**
582  * amdgpu_mm_rdoorbell - read a doorbell dword
583  *
584  * @adev: amdgpu_device pointer
585  * @index: doorbell index
586  *
587  * Returns the value in the doorbell aperture at the
588  * requested doorbell index (CIK).
589  */
590 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
591 {
592         if (amdgpu_device_skip_hw_access(adev))
593                 return 0;
594
595         if (index < adev->doorbell.num_doorbells) {
596                 return readl(adev->doorbell.ptr + index);
597         } else {
598                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
599                 return 0;
600         }
601 }
602
603 /**
604  * amdgpu_mm_wdoorbell - write a doorbell dword
605  *
606  * @adev: amdgpu_device pointer
607  * @index: doorbell index
608  * @v: value to write
609  *
610  * Writes @v to the doorbell aperture at the
611  * requested doorbell index (CIK).
612  */
613 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
614 {
615         if (amdgpu_device_skip_hw_access(adev))
616                 return;
617
618         if (index < adev->doorbell.num_doorbells) {
619                 writel(v, adev->doorbell.ptr + index);
620         } else {
621                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
622         }
623 }
624
625 /**
626  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
627  *
628  * @adev: amdgpu_device pointer
629  * @index: doorbell index
630  *
631  * Returns the value in the doorbell aperture at the
632  * requested doorbell index (VEGA10+).
633  */
634 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
635 {
636         if (amdgpu_device_skip_hw_access(adev))
637                 return 0;
638
639         if (index < adev->doorbell.num_doorbells) {
640                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
641         } else {
642                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
643                 return 0;
644         }
645 }
646
647 /**
648  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
649  *
650  * @adev: amdgpu_device pointer
651  * @index: doorbell index
652  * @v: value to write
653  *
654  * Writes @v to the doorbell aperture at the
655  * requested doorbell index (VEGA10+).
656  */
657 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
658 {
659         if (amdgpu_device_skip_hw_access(adev))
660                 return;
661
662         if (index < adev->doorbell.num_doorbells) {
663                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
664         } else {
665                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
666         }
667 }
668
669 /**
670  * amdgpu_device_indirect_rreg - read an indirect register
671  *
672  * @adev: amdgpu_device pointer
673  * @pcie_index: mmio register offset
674  * @pcie_data: mmio register offset
675  * @reg_addr: indirect register address to read from
676  *
677  * Returns the value of indirect register @reg_addr
678  */
679 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
680                                 u32 pcie_index, u32 pcie_data,
681                                 u32 reg_addr)
682 {
683         unsigned long flags;
684         u32 r;
685         void __iomem *pcie_index_offset;
686         void __iomem *pcie_data_offset;
687
688         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
689         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
690         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
691
692         writel(reg_addr, pcie_index_offset);
693         readl(pcie_index_offset);
694         r = readl(pcie_data_offset);
695         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
696
697         return r;
698 }
699
700 /**
701  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
702  *
703  * @adev: amdgpu_device pointer
704  * @pcie_index: mmio register offset
705  * @pcie_data: mmio register offset
706  * @reg_addr: indirect register address to read from
707  *
708  * Returns the value of indirect register @reg_addr
709  */
710 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
711                                   u32 pcie_index, u32 pcie_data,
712                                   u32 reg_addr)
713 {
714         unsigned long flags;
715         u64 r;
716         void __iomem *pcie_index_offset;
717         void __iomem *pcie_data_offset;
718
719         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
720         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
721         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
722
723         /* read low 32 bits */
724         writel(reg_addr, pcie_index_offset);
725         readl(pcie_index_offset);
726         r = readl(pcie_data_offset);
727         /* read high 32 bits */
728         writel(reg_addr + 4, pcie_index_offset);
729         readl(pcie_index_offset);
730         r |= ((u64)readl(pcie_data_offset) << 32);
731         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
732
733         return r;
734 }
735
736 /**
737  * amdgpu_device_indirect_wreg - write an indirect register address
738  *
739  * @adev: amdgpu_device pointer
740  * @pcie_index: mmio register offset
741  * @pcie_data: mmio register offset
742  * @reg_addr: indirect register offset
743  * @reg_data: indirect register data
744  *
745  */
746 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
747                                  u32 pcie_index, u32 pcie_data,
748                                  u32 reg_addr, u32 reg_data)
749 {
750         unsigned long flags;
751         void __iomem *pcie_index_offset;
752         void __iomem *pcie_data_offset;
753
754         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
755         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
756         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
757
758         writel(reg_addr, pcie_index_offset);
759         readl(pcie_index_offset);
760         writel(reg_data, pcie_data_offset);
761         readl(pcie_data_offset);
762         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
763 }
764
765 /**
766  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
767  *
768  * @adev: amdgpu_device pointer
769  * @pcie_index: mmio register offset
770  * @pcie_data: mmio register offset
771  * @reg_addr: indirect register offset
772  * @reg_data: indirect register data
773  *
774  */
775 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
776                                    u32 pcie_index, u32 pcie_data,
777                                    u32 reg_addr, u64 reg_data)
778 {
779         unsigned long flags;
780         void __iomem *pcie_index_offset;
781         void __iomem *pcie_data_offset;
782
783         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
784         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
785         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
786
787         /* write low 32 bits */
788         writel(reg_addr, pcie_index_offset);
789         readl(pcie_index_offset);
790         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
791         readl(pcie_data_offset);
792         /* write high 32 bits */
793         writel(reg_addr + 4, pcie_index_offset);
794         readl(pcie_index_offset);
795         writel((u32)(reg_data >> 32), pcie_data_offset);
796         readl(pcie_data_offset);
797         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
798 }
799
800 /**
801  * amdgpu_invalid_rreg - dummy reg read function
802  *
803  * @adev: amdgpu_device pointer
804  * @reg: offset of register
805  *
806  * Dummy register read function.  Used for register blocks
807  * that certain asics don't have (all asics).
808  * Returns the value in the register.
809  */
810 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
811 {
812         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
813         BUG();
814         return 0;
815 }
816
817 /**
818  * amdgpu_invalid_wreg - dummy reg write function
819  *
820  * @adev: amdgpu_device pointer
821  * @reg: offset of register
822  * @v: value to write to the register
823  *
824  * Dummy register read function.  Used for register blocks
825  * that certain asics don't have (all asics).
826  */
827 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
828 {
829         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
830                   reg, v);
831         BUG();
832 }
833
834 /**
835  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
836  *
837  * @adev: amdgpu_device pointer
838  * @reg: offset of register
839  *
840  * Dummy register read function.  Used for register blocks
841  * that certain asics don't have (all asics).
842  * Returns the value in the register.
843  */
844 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
845 {
846         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
847         BUG();
848         return 0;
849 }
850
851 /**
852  * amdgpu_invalid_wreg64 - dummy reg write function
853  *
854  * @adev: amdgpu_device pointer
855  * @reg: offset of register
856  * @v: value to write to the register
857  *
858  * Dummy register read function.  Used for register blocks
859  * that certain asics don't have (all asics).
860  */
861 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
862 {
863         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
864                   reg, v);
865         BUG();
866 }
867
868 /**
869  * amdgpu_block_invalid_rreg - dummy reg read function
870  *
871  * @adev: amdgpu_device pointer
872  * @block: offset of instance
873  * @reg: offset of register
874  *
875  * Dummy register read function.  Used for register blocks
876  * that certain asics don't have (all asics).
877  * Returns the value in the register.
878  */
879 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
880                                           uint32_t block, uint32_t reg)
881 {
882         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
883                   reg, block);
884         BUG();
885         return 0;
886 }
887
888 /**
889  * amdgpu_block_invalid_wreg - dummy reg write function
890  *
891  * @adev: amdgpu_device pointer
892  * @block: offset of instance
893  * @reg: offset of register
894  * @v: value to write to the register
895  *
896  * Dummy register read function.  Used for register blocks
897  * that certain asics don't have (all asics).
898  */
899 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
900                                       uint32_t block,
901                                       uint32_t reg, uint32_t v)
902 {
903         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
904                   reg, block, v);
905         BUG();
906 }
907
908 /**
909  * amdgpu_device_asic_init - Wrapper for atom asic_init
910  *
911  * @adev: amdgpu_device pointer
912  *
913  * Does any asic specific work and then calls atom asic init.
914  */
915 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
916 {
917         amdgpu_asic_pre_asic_init(adev);
918
919         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
920                 return amdgpu_atomfirmware_asic_init(adev, true);
921         else
922                 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
923 }
924
925 /**
926  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
927  *
928  * @adev: amdgpu_device pointer
929  *
930  * Allocates a scratch page of VRAM for use by various things in the
931  * driver.
932  */
933 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
934 {
935         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
936                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
937                                        &adev->vram_scratch.robj,
938                                        &adev->vram_scratch.gpu_addr,
939                                        (void **)&adev->vram_scratch.ptr);
940 }
941
942 /**
943  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
944  *
945  * @adev: amdgpu_device pointer
946  *
947  * Frees the VRAM scratch page.
948  */
949 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
950 {
951         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
952 }
953
954 /**
955  * amdgpu_device_program_register_sequence - program an array of registers.
956  *
957  * @adev: amdgpu_device pointer
958  * @registers: pointer to the register array
959  * @array_size: size of the register array
960  *
961  * Programs an array or registers with and and or masks.
962  * This is a helper for setting golden registers.
963  */
964 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
965                                              const u32 *registers,
966                                              const u32 array_size)
967 {
968         u32 tmp, reg, and_mask, or_mask;
969         int i;
970
971         if (array_size % 3)
972                 return;
973
974         for (i = 0; i < array_size; i +=3) {
975                 reg = registers[i + 0];
976                 and_mask = registers[i + 1];
977                 or_mask = registers[i + 2];
978
979                 if (and_mask == 0xffffffff) {
980                         tmp = or_mask;
981                 } else {
982                         tmp = RREG32(reg);
983                         tmp &= ~and_mask;
984                         if (adev->family >= AMDGPU_FAMILY_AI)
985                                 tmp |= (or_mask & and_mask);
986                         else
987                                 tmp |= or_mask;
988                 }
989                 WREG32(reg, tmp);
990         }
991 }
992
993 /**
994  * amdgpu_device_pci_config_reset - reset the GPU
995  *
996  * @adev: amdgpu_device pointer
997  *
998  * Resets the GPU using the pci config reset sequence.
999  * Only applicable to asics prior to vega10.
1000  */
1001 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1002 {
1003         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1004 }
1005
1006 /**
1007  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1008  *
1009  * @adev: amdgpu_device pointer
1010  *
1011  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1012  */
1013 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1014 {
1015         return pci_reset_function(adev->pdev);
1016 }
1017
1018 /*
1019  * GPU doorbell aperture helpers function.
1020  */
1021 /**
1022  * amdgpu_device_doorbell_init - Init doorbell driver information.
1023  *
1024  * @adev: amdgpu_device pointer
1025  *
1026  * Init doorbell driver information (CIK)
1027  * Returns 0 on success, error on failure.
1028  */
1029 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1030 {
1031
1032         /* No doorbell on SI hardware generation */
1033         if (adev->asic_type < CHIP_BONAIRE) {
1034                 adev->doorbell.base = 0;
1035                 adev->doorbell.size = 0;
1036                 adev->doorbell.num_doorbells = 0;
1037                 adev->doorbell.ptr = NULL;
1038                 return 0;
1039         }
1040
1041         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1042                 return -EINVAL;
1043
1044         amdgpu_asic_init_doorbell_index(adev);
1045
1046         /* doorbell bar mapping */
1047         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1048         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1049
1050         if (adev->enable_mes) {
1051                 adev->doorbell.num_doorbells =
1052                         adev->doorbell.size / sizeof(u32);
1053         } else {
1054                 adev->doorbell.num_doorbells =
1055                         min_t(u32, adev->doorbell.size / sizeof(u32),
1056                               adev->doorbell_index.max_assignment+1);
1057                 if (adev->doorbell.num_doorbells == 0)
1058                         return -EINVAL;
1059
1060                 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1061                  * paging queue doorbell use the second page. The
1062                  * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1063                  * doorbells are in the first page. So with paging queue enabled,
1064                  * the max num_doorbells should + 1 page (0x400 in dword)
1065                  */
1066                 if (adev->asic_type >= CHIP_VEGA10)
1067                         adev->doorbell.num_doorbells += 0x400;
1068         }
1069
1070         adev->doorbell.ptr = ioremap(adev->doorbell.base,
1071                                      adev->doorbell.num_doorbells *
1072                                      sizeof(u32));
1073         if (adev->doorbell.ptr == NULL)
1074                 return -ENOMEM;
1075
1076         return 0;
1077 }
1078
1079 /**
1080  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1081  *
1082  * @adev: amdgpu_device pointer
1083  *
1084  * Tear down doorbell driver information (CIK)
1085  */
1086 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1087 {
1088         iounmap(adev->doorbell.ptr);
1089         adev->doorbell.ptr = NULL;
1090 }
1091
1092
1093
1094 /*
1095  * amdgpu_device_wb_*()
1096  * Writeback is the method by which the GPU updates special pages in memory
1097  * with the status of certain GPU events (fences, ring pointers,etc.).
1098  */
1099
1100 /**
1101  * amdgpu_device_wb_fini - Disable Writeback and free memory
1102  *
1103  * @adev: amdgpu_device pointer
1104  *
1105  * Disables Writeback and frees the Writeback memory (all asics).
1106  * Used at driver shutdown.
1107  */
1108 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1109 {
1110         if (adev->wb.wb_obj) {
1111                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1112                                       &adev->wb.gpu_addr,
1113                                       (void **)&adev->wb.wb);
1114                 adev->wb.wb_obj = NULL;
1115         }
1116 }
1117
1118 /**
1119  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1120  *
1121  * @adev: amdgpu_device pointer
1122  *
1123  * Initializes writeback and allocates writeback memory (all asics).
1124  * Used at driver startup.
1125  * Returns 0 on success or an -error on failure.
1126  */
1127 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1128 {
1129         int r;
1130
1131         if (adev->wb.wb_obj == NULL) {
1132                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1133                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1134                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1135                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1136                                             (void **)&adev->wb.wb);
1137                 if (r) {
1138                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1139                         return r;
1140                 }
1141
1142                 adev->wb.num_wb = AMDGPU_MAX_WB;
1143                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1144
1145                 /* clear wb memory */
1146                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1147         }
1148
1149         return 0;
1150 }
1151
1152 /**
1153  * amdgpu_device_wb_get - Allocate a wb entry
1154  *
1155  * @adev: amdgpu_device pointer
1156  * @wb: wb index
1157  *
1158  * Allocate a wb slot for use by the driver (all asics).
1159  * Returns 0 on success or -EINVAL on failure.
1160  */
1161 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1162 {
1163         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1164
1165         if (offset < adev->wb.num_wb) {
1166                 __set_bit(offset, adev->wb.used);
1167                 *wb = offset << 3; /* convert to dw offset */
1168                 return 0;
1169         } else {
1170                 return -EINVAL;
1171         }
1172 }
1173
1174 /**
1175  * amdgpu_device_wb_free - Free a wb entry
1176  *
1177  * @adev: amdgpu_device pointer
1178  * @wb: wb index
1179  *
1180  * Free a wb slot allocated for use by the driver (all asics)
1181  */
1182 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1183 {
1184         wb >>= 3;
1185         if (wb < adev->wb.num_wb)
1186                 __clear_bit(wb, adev->wb.used);
1187 }
1188
1189 /**
1190  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1191  *
1192  * @adev: amdgpu_device pointer
1193  *
1194  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1195  * to fail, but if any of the BARs is not accessible after the size we abort
1196  * driver loading by returning -ENODEV.
1197  */
1198 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1199 {
1200         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1201         struct pci_bus *root;
1202         struct resource *res;
1203         unsigned i;
1204         u16 cmd;
1205         int r;
1206
1207         /* Bypass for VF */
1208         if (amdgpu_sriov_vf(adev))
1209                 return 0;
1210
1211         /* skip if the bios has already enabled large BAR */
1212         if (adev->gmc.real_vram_size &&
1213             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1214                 return 0;
1215
1216         /* Check if the root BUS has 64bit memory resources */
1217         root = adev->pdev->bus;
1218         while (root->parent)
1219                 root = root->parent;
1220
1221         pci_bus_for_each_resource(root, res, i) {
1222                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1223                     res->start > 0x100000000ull)
1224                         break;
1225         }
1226
1227         /* Trying to resize is pointless without a root hub window above 4GB */
1228         if (!res)
1229                 return 0;
1230
1231         /* Limit the BAR size to what is available */
1232         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1233                         rbar_size);
1234
1235         /* Disable memory decoding while we change the BAR addresses and size */
1236         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1237         pci_write_config_word(adev->pdev, PCI_COMMAND,
1238                               cmd & ~PCI_COMMAND_MEMORY);
1239
1240         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1241         amdgpu_device_doorbell_fini(adev);
1242         if (adev->asic_type >= CHIP_BONAIRE)
1243                 pci_release_resource(adev->pdev, 2);
1244
1245         pci_release_resource(adev->pdev, 0);
1246
1247         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1248         if (r == -ENOSPC)
1249                 DRM_INFO("Not enough PCI address space for a large BAR.");
1250         else if (r && r != -ENOTSUPP)
1251                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1252
1253         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1254
1255         /* When the doorbell or fb BAR isn't available we have no chance of
1256          * using the device.
1257          */
1258         r = amdgpu_device_doorbell_init(adev);
1259         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1260                 return -ENODEV;
1261
1262         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1263
1264         return 0;
1265 }
1266
1267 /*
1268  * GPU helpers function.
1269  */
1270 /**
1271  * amdgpu_device_need_post - check if the hw need post or not
1272  *
1273  * @adev: amdgpu_device pointer
1274  *
1275  * Check if the asic has been initialized (all asics) at driver startup
1276  * or post is needed if  hw reset is performed.
1277  * Returns true if need or false if not.
1278  */
1279 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1280 {
1281         uint32_t reg;
1282
1283         if (amdgpu_sriov_vf(adev))
1284                 return false;
1285
1286         if (amdgpu_passthrough(adev)) {
1287                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1288                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1289                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1290                  * vpost executed for smc version below 22.15
1291                  */
1292                 if (adev->asic_type == CHIP_FIJI) {
1293                         int err;
1294                         uint32_t fw_ver;
1295                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1296                         /* force vPost if error occured */
1297                         if (err)
1298                                 return true;
1299
1300                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1301                         if (fw_ver < 0x00160e00)
1302                                 return true;
1303                 }
1304         }
1305
1306         /* Don't post if we need to reset whole hive on init */
1307         if (adev->gmc.xgmi.pending_reset)
1308                 return false;
1309
1310         if (adev->has_hw_reset) {
1311                 adev->has_hw_reset = false;
1312                 return true;
1313         }
1314
1315         /* bios scratch used on CIK+ */
1316         if (adev->asic_type >= CHIP_BONAIRE)
1317                 return amdgpu_atombios_scratch_need_asic_init(adev);
1318
1319         /* check MEM_SIZE for older asics */
1320         reg = amdgpu_asic_get_config_memsize(adev);
1321
1322         if ((reg != 0) && (reg != 0xffffffff))
1323                 return false;
1324
1325         return true;
1326 }
1327
1328 /**
1329  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1330  *
1331  * @adev: amdgpu_device pointer
1332  *
1333  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1334  * be set for this device.
1335  *
1336  * Returns true if it should be used or false if not.
1337  */
1338 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1339 {
1340         switch (amdgpu_aspm) {
1341         case -1:
1342                 break;
1343         case 0:
1344                 return false;
1345         case 1:
1346                 return true;
1347         default:
1348                 return false;
1349         }
1350         return pcie_aspm_enabled(adev->pdev);
1351 }
1352
1353 /* if we get transitioned to only one device, take VGA back */
1354 /**
1355  * amdgpu_device_vga_set_decode - enable/disable vga decode
1356  *
1357  * @pdev: PCI device pointer
1358  * @state: enable/disable vga decode
1359  *
1360  * Enable/disable vga decode (all asics).
1361  * Returns VGA resource flags.
1362  */
1363 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1364                 bool state)
1365 {
1366         struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1367         amdgpu_asic_set_vga_state(adev, state);
1368         if (state)
1369                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1370                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1371         else
1372                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1373 }
1374
1375 /**
1376  * amdgpu_device_check_block_size - validate the vm block size
1377  *
1378  * @adev: amdgpu_device pointer
1379  *
1380  * Validates the vm block size specified via module parameter.
1381  * The vm block size defines number of bits in page table versus page directory,
1382  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1383  * page table and the remaining bits are in the page directory.
1384  */
1385 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1386 {
1387         /* defines number of bits in page table versus page directory,
1388          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1389          * page table and the remaining bits are in the page directory */
1390         if (amdgpu_vm_block_size == -1)
1391                 return;
1392
1393         if (amdgpu_vm_block_size < 9) {
1394                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1395                          amdgpu_vm_block_size);
1396                 amdgpu_vm_block_size = -1;
1397         }
1398 }
1399
1400 /**
1401  * amdgpu_device_check_vm_size - validate the vm size
1402  *
1403  * @adev: amdgpu_device pointer
1404  *
1405  * Validates the vm size in GB specified via module parameter.
1406  * The VM size is the size of the GPU virtual memory space in GB.
1407  */
1408 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1409 {
1410         /* no need to check the default value */
1411         if (amdgpu_vm_size == -1)
1412                 return;
1413
1414         if (amdgpu_vm_size < 1) {
1415                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1416                          amdgpu_vm_size);
1417                 amdgpu_vm_size = -1;
1418         }
1419 }
1420
1421 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1422 {
1423         struct sysinfo si;
1424         bool is_os_64 = (sizeof(void *) == 8);
1425         uint64_t total_memory;
1426         uint64_t dram_size_seven_GB = 0x1B8000000;
1427         uint64_t dram_size_three_GB = 0xB8000000;
1428
1429         if (amdgpu_smu_memory_pool_size == 0)
1430                 return;
1431
1432         if (!is_os_64) {
1433                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1434                 goto def_value;
1435         }
1436         si_meminfo(&si);
1437         total_memory = (uint64_t)si.totalram * si.mem_unit;
1438
1439         if ((amdgpu_smu_memory_pool_size == 1) ||
1440                 (amdgpu_smu_memory_pool_size == 2)) {
1441                 if (total_memory < dram_size_three_GB)
1442                         goto def_value1;
1443         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1444                 (amdgpu_smu_memory_pool_size == 8)) {
1445                 if (total_memory < dram_size_seven_GB)
1446                         goto def_value1;
1447         } else {
1448                 DRM_WARN("Smu memory pool size not supported\n");
1449                 goto def_value;
1450         }
1451         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1452
1453         return;
1454
1455 def_value1:
1456         DRM_WARN("No enough system memory\n");
1457 def_value:
1458         adev->pm.smu_prv_buffer_size = 0;
1459 }
1460
1461 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1462 {
1463         if (!(adev->flags & AMD_IS_APU) ||
1464             adev->asic_type < CHIP_RAVEN)
1465                 return 0;
1466
1467         switch (adev->asic_type) {
1468         case CHIP_RAVEN:
1469                 if (adev->pdev->device == 0x15dd)
1470                         adev->apu_flags |= AMD_APU_IS_RAVEN;
1471                 if (adev->pdev->device == 0x15d8)
1472                         adev->apu_flags |= AMD_APU_IS_PICASSO;
1473                 break;
1474         case CHIP_RENOIR:
1475                 if ((adev->pdev->device == 0x1636) ||
1476                     (adev->pdev->device == 0x164c))
1477                         adev->apu_flags |= AMD_APU_IS_RENOIR;
1478                 else
1479                         adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1480                 break;
1481         case CHIP_VANGOGH:
1482                 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1483                 break;
1484         case CHIP_YELLOW_CARP:
1485                 break;
1486         case CHIP_CYAN_SKILLFISH:
1487                 if ((adev->pdev->device == 0x13FE) ||
1488                     (adev->pdev->device == 0x143F))
1489                         adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1490                 break;
1491         default:
1492                 break;
1493         }
1494
1495         return 0;
1496 }
1497
1498 /**
1499  * amdgpu_device_check_arguments - validate module params
1500  *
1501  * @adev: amdgpu_device pointer
1502  *
1503  * Validates certain module parameters and updates
1504  * the associated values used by the driver (all asics).
1505  */
1506 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1507 {
1508         if (amdgpu_sched_jobs < 4) {
1509                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1510                          amdgpu_sched_jobs);
1511                 amdgpu_sched_jobs = 4;
1512         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1513                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1514                          amdgpu_sched_jobs);
1515                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1516         }
1517
1518         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1519                 /* gart size must be greater or equal to 32M */
1520                 dev_warn(adev->dev, "gart size (%d) too small\n",
1521                          amdgpu_gart_size);
1522                 amdgpu_gart_size = -1;
1523         }
1524
1525         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1526                 /* gtt size must be greater or equal to 32M */
1527                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1528                                  amdgpu_gtt_size);
1529                 amdgpu_gtt_size = -1;
1530         }
1531
1532         /* valid range is between 4 and 9 inclusive */
1533         if (amdgpu_vm_fragment_size != -1 &&
1534             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1535                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1536                 amdgpu_vm_fragment_size = -1;
1537         }
1538
1539         if (amdgpu_sched_hw_submission < 2) {
1540                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1541                          amdgpu_sched_hw_submission);
1542                 amdgpu_sched_hw_submission = 2;
1543         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1544                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1545                          amdgpu_sched_hw_submission);
1546                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1547         }
1548
1549         if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1550                 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1551                 amdgpu_reset_method = -1;
1552         }
1553
1554         amdgpu_device_check_smu_prv_buffer_size(adev);
1555
1556         amdgpu_device_check_vm_size(adev);
1557
1558         amdgpu_device_check_block_size(adev);
1559
1560         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1561
1562         return 0;
1563 }
1564
1565 /**
1566  * amdgpu_switcheroo_set_state - set switcheroo state
1567  *
1568  * @pdev: pci dev pointer
1569  * @state: vga_switcheroo state
1570  *
1571  * Callback for the switcheroo driver.  Suspends or resumes the
1572  * the asics before or after it is powered up using ACPI methods.
1573  */
1574 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1575                                         enum vga_switcheroo_state state)
1576 {
1577         struct drm_device *dev = pci_get_drvdata(pdev);
1578         int r;
1579
1580         if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1581                 return;
1582
1583         if (state == VGA_SWITCHEROO_ON) {
1584                 pr_info("switched on\n");
1585                 /* don't suspend or resume card normally */
1586                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1587
1588                 pci_set_power_state(pdev, PCI_D0);
1589                 amdgpu_device_load_pci_state(pdev);
1590                 r = pci_enable_device(pdev);
1591                 if (r)
1592                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1593                 amdgpu_device_resume(dev, true);
1594
1595                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1596         } else {
1597                 pr_info("switched off\n");
1598                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1599                 amdgpu_device_suspend(dev, true);
1600                 amdgpu_device_cache_pci_state(pdev);
1601                 /* Shut down the device */
1602                 pci_disable_device(pdev);
1603                 pci_set_power_state(pdev, PCI_D3cold);
1604                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1605         }
1606 }
1607
1608 /**
1609  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1610  *
1611  * @pdev: pci dev pointer
1612  *
1613  * Callback for the switcheroo driver.  Check of the switcheroo
1614  * state can be changed.
1615  * Returns true if the state can be changed, false if not.
1616  */
1617 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1618 {
1619         struct drm_device *dev = pci_get_drvdata(pdev);
1620
1621         /*
1622         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1623         * locking inversion with the driver load path. And the access here is
1624         * completely racy anyway. So don't bother with locking for now.
1625         */
1626         return atomic_read(&dev->open_count) == 0;
1627 }
1628
1629 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1630         .set_gpu_state = amdgpu_switcheroo_set_state,
1631         .reprobe = NULL,
1632         .can_switch = amdgpu_switcheroo_can_switch,
1633 };
1634
1635 /**
1636  * amdgpu_device_ip_set_clockgating_state - set the CG state
1637  *
1638  * @dev: amdgpu_device pointer
1639  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1640  * @state: clockgating state (gate or ungate)
1641  *
1642  * Sets the requested clockgating state for all instances of
1643  * the hardware IP specified.
1644  * Returns the error code from the last instance.
1645  */
1646 int amdgpu_device_ip_set_clockgating_state(void *dev,
1647                                            enum amd_ip_block_type block_type,
1648                                            enum amd_clockgating_state state)
1649 {
1650         struct amdgpu_device *adev = dev;
1651         int i, r = 0;
1652
1653         for (i = 0; i < adev->num_ip_blocks; i++) {
1654                 if (!adev->ip_blocks[i].status.valid)
1655                         continue;
1656                 if (adev->ip_blocks[i].version->type != block_type)
1657                         continue;
1658                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1659                         continue;
1660                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1661                         (void *)adev, state);
1662                 if (r)
1663                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1664                                   adev->ip_blocks[i].version->funcs->name, r);
1665         }
1666         return r;
1667 }
1668
1669 /**
1670  * amdgpu_device_ip_set_powergating_state - set the PG state
1671  *
1672  * @dev: amdgpu_device pointer
1673  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1674  * @state: powergating state (gate or ungate)
1675  *
1676  * Sets the requested powergating state for all instances of
1677  * the hardware IP specified.
1678  * Returns the error code from the last instance.
1679  */
1680 int amdgpu_device_ip_set_powergating_state(void *dev,
1681                                            enum amd_ip_block_type block_type,
1682                                            enum amd_powergating_state state)
1683 {
1684         struct amdgpu_device *adev = dev;
1685         int i, r = 0;
1686
1687         for (i = 0; i < adev->num_ip_blocks; i++) {
1688                 if (!adev->ip_blocks[i].status.valid)
1689                         continue;
1690                 if (adev->ip_blocks[i].version->type != block_type)
1691                         continue;
1692                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1693                         continue;
1694                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1695                         (void *)adev, state);
1696                 if (r)
1697                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1698                                   adev->ip_blocks[i].version->funcs->name, r);
1699         }
1700         return r;
1701 }
1702
1703 /**
1704  * amdgpu_device_ip_get_clockgating_state - get the CG state
1705  *
1706  * @adev: amdgpu_device pointer
1707  * @flags: clockgating feature flags
1708  *
1709  * Walks the list of IPs on the device and updates the clockgating
1710  * flags for each IP.
1711  * Updates @flags with the feature flags for each hardware IP where
1712  * clockgating is enabled.
1713  */
1714 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1715                                             u64 *flags)
1716 {
1717         int i;
1718
1719         for (i = 0; i < adev->num_ip_blocks; i++) {
1720                 if (!adev->ip_blocks[i].status.valid)
1721                         continue;
1722                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1723                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1724         }
1725 }
1726
1727 /**
1728  * amdgpu_device_ip_wait_for_idle - wait for idle
1729  *
1730  * @adev: amdgpu_device pointer
1731  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1732  *
1733  * Waits for the request hardware IP to be idle.
1734  * Returns 0 for success or a negative error code on failure.
1735  */
1736 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1737                                    enum amd_ip_block_type block_type)
1738 {
1739         int i, r;
1740
1741         for (i = 0; i < adev->num_ip_blocks; i++) {
1742                 if (!adev->ip_blocks[i].status.valid)
1743                         continue;
1744                 if (adev->ip_blocks[i].version->type == block_type) {
1745                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1746                         if (r)
1747                                 return r;
1748                         break;
1749                 }
1750         }
1751         return 0;
1752
1753 }
1754
1755 /**
1756  * amdgpu_device_ip_is_idle - is the hardware IP idle
1757  *
1758  * @adev: amdgpu_device pointer
1759  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1760  *
1761  * Check if the hardware IP is idle or not.
1762  * Returns true if it the IP is idle, false if not.
1763  */
1764 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1765                               enum amd_ip_block_type block_type)
1766 {
1767         int i;
1768
1769         for (i = 0; i < adev->num_ip_blocks; i++) {
1770                 if (!adev->ip_blocks[i].status.valid)
1771                         continue;
1772                 if (adev->ip_blocks[i].version->type == block_type)
1773                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1774         }
1775         return true;
1776
1777 }
1778
1779 /**
1780  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1781  *
1782  * @adev: amdgpu_device pointer
1783  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1784  *
1785  * Returns a pointer to the hardware IP block structure
1786  * if it exists for the asic, otherwise NULL.
1787  */
1788 struct amdgpu_ip_block *
1789 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1790                               enum amd_ip_block_type type)
1791 {
1792         int i;
1793
1794         for (i = 0; i < adev->num_ip_blocks; i++)
1795                 if (adev->ip_blocks[i].version->type == type)
1796                         return &adev->ip_blocks[i];
1797
1798         return NULL;
1799 }
1800
1801 /**
1802  * amdgpu_device_ip_block_version_cmp
1803  *
1804  * @adev: amdgpu_device pointer
1805  * @type: enum amd_ip_block_type
1806  * @major: major version
1807  * @minor: minor version
1808  *
1809  * return 0 if equal or greater
1810  * return 1 if smaller or the ip_block doesn't exist
1811  */
1812 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1813                                        enum amd_ip_block_type type,
1814                                        u32 major, u32 minor)
1815 {
1816         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1817
1818         if (ip_block && ((ip_block->version->major > major) ||
1819                         ((ip_block->version->major == major) &&
1820                         (ip_block->version->minor >= minor))))
1821                 return 0;
1822
1823         return 1;
1824 }
1825
1826 /**
1827  * amdgpu_device_ip_block_add
1828  *
1829  * @adev: amdgpu_device pointer
1830  * @ip_block_version: pointer to the IP to add
1831  *
1832  * Adds the IP block driver information to the collection of IPs
1833  * on the asic.
1834  */
1835 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1836                                const struct amdgpu_ip_block_version *ip_block_version)
1837 {
1838         if (!ip_block_version)
1839                 return -EINVAL;
1840
1841         switch (ip_block_version->type) {
1842         case AMD_IP_BLOCK_TYPE_VCN:
1843                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1844                         return 0;
1845                 break;
1846         case AMD_IP_BLOCK_TYPE_JPEG:
1847                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1848                         return 0;
1849                 break;
1850         default:
1851                 break;
1852         }
1853
1854         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1855                   ip_block_version->funcs->name);
1856
1857         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1858
1859         return 0;
1860 }
1861
1862 /**
1863  * amdgpu_device_enable_virtual_display - enable virtual display feature
1864  *
1865  * @adev: amdgpu_device pointer
1866  *
1867  * Enabled the virtual display feature if the user has enabled it via
1868  * the module parameter virtual_display.  This feature provides a virtual
1869  * display hardware on headless boards or in virtualized environments.
1870  * This function parses and validates the configuration string specified by
1871  * the user and configues the virtual display configuration (number of
1872  * virtual connectors, crtcs, etc.) specified.
1873  */
1874 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1875 {
1876         adev->enable_virtual_display = false;
1877
1878         if (amdgpu_virtual_display) {
1879                 const char *pci_address_name = pci_name(adev->pdev);
1880                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1881
1882                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1883                 pciaddstr_tmp = pciaddstr;
1884                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1885                         pciaddname = strsep(&pciaddname_tmp, ",");
1886                         if (!strcmp("all", pciaddname)
1887                             || !strcmp(pci_address_name, pciaddname)) {
1888                                 long num_crtc;
1889                                 int res = -1;
1890
1891                                 adev->enable_virtual_display = true;
1892
1893                                 if (pciaddname_tmp)
1894                                         res = kstrtol(pciaddname_tmp, 10,
1895                                                       &num_crtc);
1896
1897                                 if (!res) {
1898                                         if (num_crtc < 1)
1899                                                 num_crtc = 1;
1900                                         if (num_crtc > 6)
1901                                                 num_crtc = 6;
1902                                         adev->mode_info.num_crtc = num_crtc;
1903                                 } else {
1904                                         adev->mode_info.num_crtc = 1;
1905                                 }
1906                                 break;
1907                         }
1908                 }
1909
1910                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1911                          amdgpu_virtual_display, pci_address_name,
1912                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1913
1914                 kfree(pciaddstr);
1915         }
1916 }
1917
1918 /**
1919  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1920  *
1921  * @adev: amdgpu_device pointer
1922  *
1923  * Parses the asic configuration parameters specified in the gpu info
1924  * firmware and makes them availale to the driver for use in configuring
1925  * the asic.
1926  * Returns 0 on success, -EINVAL on failure.
1927  */
1928 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1929 {
1930         const char *chip_name;
1931         char fw_name[40];
1932         int err;
1933         const struct gpu_info_firmware_header_v1_0 *hdr;
1934
1935         adev->firmware.gpu_info_fw = NULL;
1936
1937         if (adev->mman.discovery_bin) {
1938                 /*
1939                  * FIXME: The bounding box is still needed by Navi12, so
1940                  * temporarily read it from gpu_info firmware. Should be dropped
1941                  * when DAL no longer needs it.
1942                  */
1943                 if (adev->asic_type != CHIP_NAVI12)
1944                         return 0;
1945         }
1946
1947         switch (adev->asic_type) {
1948         default:
1949                 return 0;
1950         case CHIP_VEGA10:
1951                 chip_name = "vega10";
1952                 break;
1953         case CHIP_VEGA12:
1954                 chip_name = "vega12";
1955                 break;
1956         case CHIP_RAVEN:
1957                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1958                         chip_name = "raven2";
1959                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1960                         chip_name = "picasso";
1961                 else
1962                         chip_name = "raven";
1963                 break;
1964         case CHIP_ARCTURUS:
1965                 chip_name = "arcturus";
1966                 break;
1967         case CHIP_NAVI12:
1968                 chip_name = "navi12";
1969                 break;
1970         }
1971
1972         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1973         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1974         if (err) {
1975                 dev_err(adev->dev,
1976                         "Failed to load gpu_info firmware \"%s\"\n",
1977                         fw_name);
1978                 goto out;
1979         }
1980         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1981         if (err) {
1982                 dev_err(adev->dev,
1983                         "Failed to validate gpu_info firmware \"%s\"\n",
1984                         fw_name);
1985                 goto out;
1986         }
1987
1988         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1989         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1990
1991         switch (hdr->version_major) {
1992         case 1:
1993         {
1994                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1995                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1996                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1997
1998                 /*
1999                  * Should be droped when DAL no longer needs it.
2000                  */
2001                 if (adev->asic_type == CHIP_NAVI12)
2002                         goto parse_soc_bounding_box;
2003
2004                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2005                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2006                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2007                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2008                 adev->gfx.config.max_texture_channel_caches =
2009                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
2010                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2011                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2012                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2013                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2014                 adev->gfx.config.double_offchip_lds_buf =
2015                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2016                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2017                 adev->gfx.cu_info.max_waves_per_simd =
2018                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2019                 adev->gfx.cu_info.max_scratch_slots_per_cu =
2020                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2021                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2022                 if (hdr->version_minor >= 1) {
2023                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2024                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2025                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2026                         adev->gfx.config.num_sc_per_sh =
2027                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2028                         adev->gfx.config.num_packer_per_sc =
2029                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2030                 }
2031
2032 parse_soc_bounding_box:
2033                 /*
2034                  * soc bounding box info is not integrated in disocovery table,
2035                  * we always need to parse it from gpu info firmware if needed.
2036                  */
2037                 if (hdr->version_minor == 2) {
2038                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2039                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2040                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2041                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2042                 }
2043                 break;
2044         }
2045         default:
2046                 dev_err(adev->dev,
2047                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2048                 err = -EINVAL;
2049                 goto out;
2050         }
2051 out:
2052         return err;
2053 }
2054
2055 /**
2056  * amdgpu_device_ip_early_init - run early init for hardware IPs
2057  *
2058  * @adev: amdgpu_device pointer
2059  *
2060  * Early initialization pass for hardware IPs.  The hardware IPs that make
2061  * up each asic are discovered each IP's early_init callback is run.  This
2062  * is the first stage in initializing the asic.
2063  * Returns 0 on success, negative error code on failure.
2064  */
2065 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2066 {
2067         struct drm_device *dev = adev_to_drm(adev);
2068         struct pci_dev *parent;
2069         int i, r;
2070
2071         amdgpu_device_enable_virtual_display(adev);
2072
2073         if (amdgpu_sriov_vf(adev)) {
2074                 r = amdgpu_virt_request_full_gpu(adev, true);
2075                 if (r)
2076                         return r;
2077         }
2078
2079         switch (adev->asic_type) {
2080 #ifdef CONFIG_DRM_AMDGPU_SI
2081         case CHIP_VERDE:
2082         case CHIP_TAHITI:
2083         case CHIP_PITCAIRN:
2084         case CHIP_OLAND:
2085         case CHIP_HAINAN:
2086                 adev->family = AMDGPU_FAMILY_SI;
2087                 r = si_set_ip_blocks(adev);
2088                 if (r)
2089                         return r;
2090                 break;
2091 #endif
2092 #ifdef CONFIG_DRM_AMDGPU_CIK
2093         case CHIP_BONAIRE:
2094         case CHIP_HAWAII:
2095         case CHIP_KAVERI:
2096         case CHIP_KABINI:
2097         case CHIP_MULLINS:
2098                 if (adev->flags & AMD_IS_APU)
2099                         adev->family = AMDGPU_FAMILY_KV;
2100                 else
2101                         adev->family = AMDGPU_FAMILY_CI;
2102
2103                 r = cik_set_ip_blocks(adev);
2104                 if (r)
2105                         return r;
2106                 break;
2107 #endif
2108         case CHIP_TOPAZ:
2109         case CHIP_TONGA:
2110         case CHIP_FIJI:
2111         case CHIP_POLARIS10:
2112         case CHIP_POLARIS11:
2113         case CHIP_POLARIS12:
2114         case CHIP_VEGAM:
2115         case CHIP_CARRIZO:
2116         case CHIP_STONEY:
2117                 if (adev->flags & AMD_IS_APU)
2118                         adev->family = AMDGPU_FAMILY_CZ;
2119                 else
2120                         adev->family = AMDGPU_FAMILY_VI;
2121
2122                 r = vi_set_ip_blocks(adev);
2123                 if (r)
2124                         return r;
2125                 break;
2126         default:
2127                 r = amdgpu_discovery_set_ip_blocks(adev);
2128                 if (r)
2129                         return r;
2130                 break;
2131         }
2132
2133         if (amdgpu_has_atpx() &&
2134             (amdgpu_is_atpx_hybrid() ||
2135              amdgpu_has_atpx_dgpu_power_cntl()) &&
2136             ((adev->flags & AMD_IS_APU) == 0) &&
2137             !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2138                 adev->flags |= AMD_IS_PX;
2139
2140         if (!(adev->flags & AMD_IS_APU)) {
2141                 parent = pci_upstream_bridge(adev->pdev);
2142                 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2143         }
2144
2145         amdgpu_amdkfd_device_probe(adev);
2146
2147         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2148         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2149                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2150         if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2151                 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2152
2153         for (i = 0; i < adev->num_ip_blocks; i++) {
2154                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2155                         DRM_ERROR("disabled ip block: %d <%s>\n",
2156                                   i, adev->ip_blocks[i].version->funcs->name);
2157                         adev->ip_blocks[i].status.valid = false;
2158                 } else {
2159                         if (adev->ip_blocks[i].version->funcs->early_init) {
2160                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2161                                 if (r == -ENOENT) {
2162                                         adev->ip_blocks[i].status.valid = false;
2163                                 } else if (r) {
2164                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2165                                                   adev->ip_blocks[i].version->funcs->name, r);
2166                                         return r;
2167                                 } else {
2168                                         adev->ip_blocks[i].status.valid = true;
2169                                 }
2170                         } else {
2171                                 adev->ip_blocks[i].status.valid = true;
2172                         }
2173                 }
2174                 /* get the vbios after the asic_funcs are set up */
2175                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2176                         r = amdgpu_device_parse_gpu_info_fw(adev);
2177                         if (r)
2178                                 return r;
2179
2180                         /* Read BIOS */
2181                         if (!amdgpu_get_bios(adev))
2182                                 return -EINVAL;
2183
2184                         r = amdgpu_atombios_init(adev);
2185                         if (r) {
2186                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2187                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2188                                 return r;
2189                         }
2190
2191                         /*get pf2vf msg info at it's earliest time*/
2192                         if (amdgpu_sriov_vf(adev))
2193                                 amdgpu_virt_init_data_exchange(adev);
2194
2195                 }
2196         }
2197
2198         adev->cg_flags &= amdgpu_cg_mask;
2199         adev->pg_flags &= amdgpu_pg_mask;
2200
2201         return 0;
2202 }
2203
2204 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2205 {
2206         int i, r;
2207
2208         for (i = 0; i < adev->num_ip_blocks; i++) {
2209                 if (!adev->ip_blocks[i].status.sw)
2210                         continue;
2211                 if (adev->ip_blocks[i].status.hw)
2212                         continue;
2213                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2214                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2215                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2216                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2217                         if (r) {
2218                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2219                                           adev->ip_blocks[i].version->funcs->name, r);
2220                                 return r;
2221                         }
2222                         adev->ip_blocks[i].status.hw = true;
2223                 }
2224         }
2225
2226         return 0;
2227 }
2228
2229 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2230 {
2231         int i, r;
2232
2233         for (i = 0; i < adev->num_ip_blocks; i++) {
2234                 if (!adev->ip_blocks[i].status.sw)
2235                         continue;
2236                 if (adev->ip_blocks[i].status.hw)
2237                         continue;
2238                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2239                 if (r) {
2240                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2241                                   adev->ip_blocks[i].version->funcs->name, r);
2242                         return r;
2243                 }
2244                 adev->ip_blocks[i].status.hw = true;
2245         }
2246
2247         return 0;
2248 }
2249
2250 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2251 {
2252         int r = 0;
2253         int i;
2254         uint32_t smu_version;
2255
2256         if (adev->asic_type >= CHIP_VEGA10) {
2257                 for (i = 0; i < adev->num_ip_blocks; i++) {
2258                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2259                                 continue;
2260
2261                         if (!adev->ip_blocks[i].status.sw)
2262                                 continue;
2263
2264                         /* no need to do the fw loading again if already done*/
2265                         if (adev->ip_blocks[i].status.hw == true)
2266                                 break;
2267
2268                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2269                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2270                                 if (r) {
2271                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2272                                                           adev->ip_blocks[i].version->funcs->name, r);
2273                                         return r;
2274                                 }
2275                         } else {
2276                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2277                                 if (r) {
2278                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2279                                                           adev->ip_blocks[i].version->funcs->name, r);
2280                                         return r;
2281                                 }
2282                         }
2283
2284                         adev->ip_blocks[i].status.hw = true;
2285                         break;
2286                 }
2287         }
2288
2289         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2290                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2291
2292         return r;
2293 }
2294
2295 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2296 {
2297         long timeout;
2298         int r, i;
2299
2300         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2301                 struct amdgpu_ring *ring = adev->rings[i];
2302
2303                 /* No need to setup the GPU scheduler for rings that don't need it */
2304                 if (!ring || ring->no_scheduler)
2305                         continue;
2306
2307                 switch (ring->funcs->type) {
2308                 case AMDGPU_RING_TYPE_GFX:
2309                         timeout = adev->gfx_timeout;
2310                         break;
2311                 case AMDGPU_RING_TYPE_COMPUTE:
2312                         timeout = adev->compute_timeout;
2313                         break;
2314                 case AMDGPU_RING_TYPE_SDMA:
2315                         timeout = adev->sdma_timeout;
2316                         break;
2317                 default:
2318                         timeout = adev->video_timeout;
2319                         break;
2320                 }
2321
2322                 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2323                                    ring->num_hw_submission, amdgpu_job_hang_limit,
2324                                    timeout, adev->reset_domain->wq,
2325                                    ring->sched_score, ring->name,
2326                                    adev->dev);
2327                 if (r) {
2328                         DRM_ERROR("Failed to create scheduler on ring %s.\n",
2329                                   ring->name);
2330                         return r;
2331                 }
2332         }
2333
2334         return 0;
2335 }
2336
2337
2338 /**
2339  * amdgpu_device_ip_init - run init for hardware IPs
2340  *
2341  * @adev: amdgpu_device pointer
2342  *
2343  * Main initialization pass for hardware IPs.  The list of all the hardware
2344  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2345  * are run.  sw_init initializes the software state associated with each IP
2346  * and hw_init initializes the hardware associated with each IP.
2347  * Returns 0 on success, negative error code on failure.
2348  */
2349 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2350 {
2351         int i, r;
2352
2353         r = amdgpu_ras_init(adev);
2354         if (r)
2355                 return r;
2356
2357         for (i = 0; i < adev->num_ip_blocks; i++) {
2358                 if (!adev->ip_blocks[i].status.valid)
2359                         continue;
2360                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2361                 if (r) {
2362                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2363                                   adev->ip_blocks[i].version->funcs->name, r);
2364                         goto init_failed;
2365                 }
2366                 adev->ip_blocks[i].status.sw = true;
2367
2368                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2369                         /* need to do common hw init early so everything is set up for gmc */
2370                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2371                         if (r) {
2372                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2373                                 goto init_failed;
2374                         }
2375                         adev->ip_blocks[i].status.hw = true;
2376                 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2377                         /* need to do gmc hw init early so we can allocate gpu mem */
2378                         /* Try to reserve bad pages early */
2379                         if (amdgpu_sriov_vf(adev))
2380                                 amdgpu_virt_exchange_data(adev);
2381
2382                         r = amdgpu_device_vram_scratch_init(adev);
2383                         if (r) {
2384                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2385                                 goto init_failed;
2386                         }
2387                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2388                         if (r) {
2389                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2390                                 goto init_failed;
2391                         }
2392                         r = amdgpu_device_wb_init(adev);
2393                         if (r) {
2394                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2395                                 goto init_failed;
2396                         }
2397                         adev->ip_blocks[i].status.hw = true;
2398
2399                         /* right after GMC hw init, we create CSA */
2400                         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2401                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2402                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2403                                                                 AMDGPU_CSA_SIZE);
2404                                 if (r) {
2405                                         DRM_ERROR("allocate CSA failed %d\n", r);
2406                                         goto init_failed;
2407                                 }
2408                         }
2409                 }
2410         }
2411
2412         if (amdgpu_sriov_vf(adev))
2413                 amdgpu_virt_init_data_exchange(adev);
2414
2415         r = amdgpu_ib_pool_init(adev);
2416         if (r) {
2417                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2418                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2419                 goto init_failed;
2420         }
2421
2422         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2423         if (r)
2424                 goto init_failed;
2425
2426         r = amdgpu_device_ip_hw_init_phase1(adev);
2427         if (r)
2428                 goto init_failed;
2429
2430         r = amdgpu_device_fw_loading(adev);
2431         if (r)
2432                 goto init_failed;
2433
2434         r = amdgpu_device_ip_hw_init_phase2(adev);
2435         if (r)
2436                 goto init_failed;
2437
2438         /*
2439          * retired pages will be loaded from eeprom and reserved here,
2440          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2441          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2442          * for I2C communication which only true at this point.
2443          *
2444          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2445          * failure from bad gpu situation and stop amdgpu init process
2446          * accordingly. For other failed cases, it will still release all
2447          * the resource and print error message, rather than returning one
2448          * negative value to upper level.
2449          *
2450          * Note: theoretically, this should be called before all vram allocations
2451          * to protect retired page from abusing
2452          */
2453         r = amdgpu_ras_recovery_init(adev);
2454         if (r)
2455                 goto init_failed;
2456
2457         /**
2458          * In case of XGMI grab extra reference for reset domain for this device
2459          */
2460         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2461                 if (amdgpu_xgmi_add_device(adev) == 0) {
2462                         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2463
2464                         if (!hive->reset_domain ||
2465                             !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2466                                 r = -ENOENT;
2467                                 amdgpu_put_xgmi_hive(hive);
2468                                 goto init_failed;
2469                         }
2470
2471                         /* Drop the early temporary reset domain we created for device */
2472                         amdgpu_reset_put_reset_domain(adev->reset_domain);
2473                         adev->reset_domain = hive->reset_domain;
2474                         amdgpu_put_xgmi_hive(hive);
2475                 }
2476         }
2477
2478         r = amdgpu_device_init_schedulers(adev);
2479         if (r)
2480                 goto init_failed;
2481
2482         /* Don't init kfd if whole hive need to be reset during init */
2483         if (!adev->gmc.xgmi.pending_reset)
2484                 amdgpu_amdkfd_device_init(adev);
2485
2486         amdgpu_fru_get_product_info(adev);
2487
2488 init_failed:
2489         if (amdgpu_sriov_vf(adev))
2490                 amdgpu_virt_release_full_gpu(adev, true);
2491
2492         return r;
2493 }
2494
2495 /**
2496  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2497  *
2498  * @adev: amdgpu_device pointer
2499  *
2500  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2501  * this function before a GPU reset.  If the value is retained after a
2502  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2503  */
2504 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2505 {
2506         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2507 }
2508
2509 /**
2510  * amdgpu_device_check_vram_lost - check if vram is valid
2511  *
2512  * @adev: amdgpu_device pointer
2513  *
2514  * Checks the reset magic value written to the gart pointer in VRAM.
2515  * The driver calls this after a GPU reset to see if the contents of
2516  * VRAM is lost or now.
2517  * returns true if vram is lost, false if not.
2518  */
2519 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2520 {
2521         if (memcmp(adev->gart.ptr, adev->reset_magic,
2522                         AMDGPU_RESET_MAGIC_NUM))
2523                 return true;
2524
2525         if (!amdgpu_in_reset(adev))
2526                 return false;
2527
2528         /*
2529          * For all ASICs with baco/mode1 reset, the VRAM is
2530          * always assumed to be lost.
2531          */
2532         switch (amdgpu_asic_reset_method(adev)) {
2533         case AMD_RESET_METHOD_BACO:
2534         case AMD_RESET_METHOD_MODE1:
2535                 return true;
2536         default:
2537                 return false;
2538         }
2539 }
2540
2541 /**
2542  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2543  *
2544  * @adev: amdgpu_device pointer
2545  * @state: clockgating state (gate or ungate)
2546  *
2547  * The list of all the hardware IPs that make up the asic is walked and the
2548  * set_clockgating_state callbacks are run.
2549  * Late initialization pass enabling clockgating for hardware IPs.
2550  * Fini or suspend, pass disabling clockgating for hardware IPs.
2551  * Returns 0 on success, negative error code on failure.
2552  */
2553
2554 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2555                                enum amd_clockgating_state state)
2556 {
2557         int i, j, r;
2558
2559         if (amdgpu_emu_mode == 1)
2560                 return 0;
2561
2562         for (j = 0; j < adev->num_ip_blocks; j++) {
2563                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2564                 if (!adev->ip_blocks[i].status.late_initialized)
2565                         continue;
2566                 /* skip CG for GFX on S0ix */
2567                 if (adev->in_s0ix &&
2568                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2569                         continue;
2570                 /* skip CG for VCE/UVD, it's handled specially */
2571                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2572                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2573                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2574                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2575                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2576                         /* enable clockgating to save power */
2577                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2578                                                                                      state);
2579                         if (r) {
2580                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2581                                           adev->ip_blocks[i].version->funcs->name, r);
2582                                 return r;
2583                         }
2584                 }
2585         }
2586
2587         return 0;
2588 }
2589
2590 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2591                                enum amd_powergating_state state)
2592 {
2593         int i, j, r;
2594
2595         if (amdgpu_emu_mode == 1)
2596                 return 0;
2597
2598         for (j = 0; j < adev->num_ip_blocks; j++) {
2599                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2600                 if (!adev->ip_blocks[i].status.late_initialized)
2601                         continue;
2602                 /* skip PG for GFX on S0ix */
2603                 if (adev->in_s0ix &&
2604                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2605                         continue;
2606                 /* skip CG for VCE/UVD, it's handled specially */
2607                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2608                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2609                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2610                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2611                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2612                         /* enable powergating to save power */
2613                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2614                                                                                         state);
2615                         if (r) {
2616                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2617                                           adev->ip_blocks[i].version->funcs->name, r);
2618                                 return r;
2619                         }
2620                 }
2621         }
2622         return 0;
2623 }
2624
2625 static int amdgpu_device_enable_mgpu_fan_boost(void)
2626 {
2627         struct amdgpu_gpu_instance *gpu_ins;
2628         struct amdgpu_device *adev;
2629         int i, ret = 0;
2630
2631         mutex_lock(&mgpu_info.mutex);
2632
2633         /*
2634          * MGPU fan boost feature should be enabled
2635          * only when there are two or more dGPUs in
2636          * the system
2637          */
2638         if (mgpu_info.num_dgpu < 2)
2639                 goto out;
2640
2641         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2642                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2643                 adev = gpu_ins->adev;
2644                 if (!(adev->flags & AMD_IS_APU) &&
2645                     !gpu_ins->mgpu_fan_enabled) {
2646                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2647                         if (ret)
2648                                 break;
2649
2650                         gpu_ins->mgpu_fan_enabled = 1;
2651                 }
2652         }
2653
2654 out:
2655         mutex_unlock(&mgpu_info.mutex);
2656
2657         return ret;
2658 }
2659
2660 /**
2661  * amdgpu_device_ip_late_init - run late init for hardware IPs
2662  *
2663  * @adev: amdgpu_device pointer
2664  *
2665  * Late initialization pass for hardware IPs.  The list of all the hardware
2666  * IPs that make up the asic is walked and the late_init callbacks are run.
2667  * late_init covers any special initialization that an IP requires
2668  * after all of the have been initialized or something that needs to happen
2669  * late in the init process.
2670  * Returns 0 on success, negative error code on failure.
2671  */
2672 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2673 {
2674         struct amdgpu_gpu_instance *gpu_instance;
2675         int i = 0, r;
2676
2677         for (i = 0; i < adev->num_ip_blocks; i++) {
2678                 if (!adev->ip_blocks[i].status.hw)
2679                         continue;
2680                 if (adev->ip_blocks[i].version->funcs->late_init) {
2681                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2682                         if (r) {
2683                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2684                                           adev->ip_blocks[i].version->funcs->name, r);
2685                                 return r;
2686                         }
2687                 }
2688                 adev->ip_blocks[i].status.late_initialized = true;
2689         }
2690
2691         r = amdgpu_ras_late_init(adev);
2692         if (r) {
2693                 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2694                 return r;
2695         }
2696
2697         amdgpu_ras_set_error_query_ready(adev, true);
2698
2699         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2700         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2701
2702         amdgpu_device_fill_reset_magic(adev);
2703
2704         r = amdgpu_device_enable_mgpu_fan_boost();
2705         if (r)
2706                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2707
2708         /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2709         if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2710                                adev->asic_type == CHIP_ALDEBARAN ))
2711                 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2712
2713         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2714                 mutex_lock(&mgpu_info.mutex);
2715
2716                 /*
2717                  * Reset device p-state to low as this was booted with high.
2718                  *
2719                  * This should be performed only after all devices from the same
2720                  * hive get initialized.
2721                  *
2722                  * However, it's unknown how many device in the hive in advance.
2723                  * As this is counted one by one during devices initializations.
2724                  *
2725                  * So, we wait for all XGMI interlinked devices initialized.
2726                  * This may bring some delays as those devices may come from
2727                  * different hives. But that should be OK.
2728                  */
2729                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2730                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2731                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2732                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2733                                         continue;
2734
2735                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2736                                                 AMDGPU_XGMI_PSTATE_MIN);
2737                                 if (r) {
2738                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2739                                         break;
2740                                 }
2741                         }
2742                 }
2743
2744                 mutex_unlock(&mgpu_info.mutex);
2745         }
2746
2747         return 0;
2748 }
2749
2750 /**
2751  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2752  *
2753  * @adev: amdgpu_device pointer
2754  *
2755  * For ASICs need to disable SMC first
2756  */
2757 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2758 {
2759         int i, r;
2760
2761         if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2762                 return;
2763
2764         for (i = 0; i < adev->num_ip_blocks; i++) {
2765                 if (!adev->ip_blocks[i].status.hw)
2766                         continue;
2767                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2768                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2769                         /* XXX handle errors */
2770                         if (r) {
2771                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2772                                           adev->ip_blocks[i].version->funcs->name, r);
2773                         }
2774                         adev->ip_blocks[i].status.hw = false;
2775                         break;
2776                 }
2777         }
2778 }
2779
2780 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2781 {
2782         int i, r;
2783
2784         for (i = 0; i < adev->num_ip_blocks; i++) {
2785                 if (!adev->ip_blocks[i].version->funcs->early_fini)
2786                         continue;
2787
2788                 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2789                 if (r) {
2790                         DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2791                                   adev->ip_blocks[i].version->funcs->name, r);
2792                 }
2793         }
2794
2795         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2796         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2797
2798         amdgpu_amdkfd_suspend(adev, false);
2799
2800         /* Workaroud for ASICs need to disable SMC first */
2801         amdgpu_device_smu_fini_early(adev);
2802
2803         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2804                 if (!adev->ip_blocks[i].status.hw)
2805                         continue;
2806
2807                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2808                 /* XXX handle errors */
2809                 if (r) {
2810                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2811                                   adev->ip_blocks[i].version->funcs->name, r);
2812                 }
2813
2814                 adev->ip_blocks[i].status.hw = false;
2815         }
2816
2817         if (amdgpu_sriov_vf(adev)) {
2818                 if (amdgpu_virt_release_full_gpu(adev, false))
2819                         DRM_ERROR("failed to release exclusive mode on fini\n");
2820         }
2821
2822         return 0;
2823 }
2824
2825 /**
2826  * amdgpu_device_ip_fini - run fini for hardware IPs
2827  *
2828  * @adev: amdgpu_device pointer
2829  *
2830  * Main teardown pass for hardware IPs.  The list of all the hardware
2831  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2832  * are run.  hw_fini tears down the hardware associated with each IP
2833  * and sw_fini tears down any software state associated with each IP.
2834  * Returns 0 on success, negative error code on failure.
2835  */
2836 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2837 {
2838         int i, r;
2839
2840         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2841                 amdgpu_virt_release_ras_err_handler_data(adev);
2842
2843         if (adev->gmc.xgmi.num_physical_nodes > 1)
2844                 amdgpu_xgmi_remove_device(adev);
2845
2846         amdgpu_amdkfd_device_fini_sw(adev);
2847
2848         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2849                 if (!adev->ip_blocks[i].status.sw)
2850                         continue;
2851
2852                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2853                         amdgpu_ucode_free_bo(adev);
2854                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2855                         amdgpu_device_wb_fini(adev);
2856                         amdgpu_device_vram_scratch_fini(adev);
2857                         amdgpu_ib_pool_fini(adev);
2858                 }
2859
2860                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2861                 /* XXX handle errors */
2862                 if (r) {
2863                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2864                                   adev->ip_blocks[i].version->funcs->name, r);
2865                 }
2866                 adev->ip_blocks[i].status.sw = false;
2867                 adev->ip_blocks[i].status.valid = false;
2868         }
2869
2870         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2871                 if (!adev->ip_blocks[i].status.late_initialized)
2872                         continue;
2873                 if (adev->ip_blocks[i].version->funcs->late_fini)
2874                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2875                 adev->ip_blocks[i].status.late_initialized = false;
2876         }
2877
2878         amdgpu_ras_fini(adev);
2879
2880         return 0;
2881 }
2882
2883 /**
2884  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2885  *
2886  * @work: work_struct.
2887  */
2888 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2889 {
2890         struct amdgpu_device *adev =
2891                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2892         int r;
2893
2894         r = amdgpu_ib_ring_tests(adev);
2895         if (r)
2896                 DRM_ERROR("ib ring test failed (%d).\n", r);
2897 }
2898
2899 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2900 {
2901         struct amdgpu_device *adev =
2902                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2903
2904         WARN_ON_ONCE(adev->gfx.gfx_off_state);
2905         WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2906
2907         if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2908                 adev->gfx.gfx_off_state = true;
2909 }
2910
2911 /**
2912  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2913  *
2914  * @adev: amdgpu_device pointer
2915  *
2916  * Main suspend function for hardware IPs.  The list of all the hardware
2917  * IPs that make up the asic is walked, clockgating is disabled and the
2918  * suspend callbacks are run.  suspend puts the hardware and software state
2919  * in each IP into a state suitable for suspend.
2920  * Returns 0 on success, negative error code on failure.
2921  */
2922 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2923 {
2924         int i, r;
2925
2926         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2927         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2928
2929         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2930                 if (!adev->ip_blocks[i].status.valid)
2931                         continue;
2932
2933                 /* displays are handled separately */
2934                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2935                         continue;
2936
2937                 /* XXX handle errors */
2938                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2939                 /* XXX handle errors */
2940                 if (r) {
2941                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2942                                   adev->ip_blocks[i].version->funcs->name, r);
2943                         return r;
2944                 }
2945
2946                 adev->ip_blocks[i].status.hw = false;
2947         }
2948
2949         return 0;
2950 }
2951
2952 /**
2953  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2954  *
2955  * @adev: amdgpu_device pointer
2956  *
2957  * Main suspend function for hardware IPs.  The list of all the hardware
2958  * IPs that make up the asic is walked, clockgating is disabled and the
2959  * suspend callbacks are run.  suspend puts the hardware and software state
2960  * in each IP into a state suitable for suspend.
2961  * Returns 0 on success, negative error code on failure.
2962  */
2963 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2964 {
2965         int i, r;
2966
2967         if (adev->in_s0ix)
2968                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2969
2970         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2971                 if (!adev->ip_blocks[i].status.valid)
2972                         continue;
2973                 /* displays are handled in phase1 */
2974                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2975                         continue;
2976                 /* PSP lost connection when err_event_athub occurs */
2977                 if (amdgpu_ras_intr_triggered() &&
2978                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2979                         adev->ip_blocks[i].status.hw = false;
2980                         continue;
2981                 }
2982
2983                 /* skip unnecessary suspend if we do not initialize them yet */
2984                 if (adev->gmc.xgmi.pending_reset &&
2985                     !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2986                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2987                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2988                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2989                         adev->ip_blocks[i].status.hw = false;
2990                         continue;
2991                 }
2992
2993                 /* skip suspend of gfx and psp for S0ix
2994                  * gfx is in gfxoff state, so on resume it will exit gfxoff just
2995                  * like at runtime. PSP is also part of the always on hardware
2996                  * so no need to suspend it.
2997                  */
2998                 if (adev->in_s0ix &&
2999                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3000                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
3001                         continue;
3002
3003                 /* XXX handle errors */
3004                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3005                 /* XXX handle errors */
3006                 if (r) {
3007                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
3008                                   adev->ip_blocks[i].version->funcs->name, r);
3009                 }
3010                 adev->ip_blocks[i].status.hw = false;
3011                 /* handle putting the SMC in the appropriate state */
3012                 if(!amdgpu_sriov_vf(adev)){
3013                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3014                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3015                                 if (r) {
3016                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3017                                                         adev->mp1_state, r);
3018                                         return r;
3019                                 }
3020                         }
3021                 }
3022         }
3023
3024         return 0;
3025 }
3026
3027 /**
3028  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3029  *
3030  * @adev: amdgpu_device pointer
3031  *
3032  * Main suspend function for hardware IPs.  The list of all the hardware
3033  * IPs that make up the asic is walked, clockgating is disabled and the
3034  * suspend callbacks are run.  suspend puts the hardware and software state
3035  * in each IP into a state suitable for suspend.
3036  * Returns 0 on success, negative error code on failure.
3037  */
3038 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3039 {
3040         int r;
3041
3042         if (amdgpu_sriov_vf(adev)) {
3043                 amdgpu_virt_fini_data_exchange(adev);
3044                 amdgpu_virt_request_full_gpu(adev, false);
3045         }
3046
3047         r = amdgpu_device_ip_suspend_phase1(adev);
3048         if (r)
3049                 return r;
3050         r = amdgpu_device_ip_suspend_phase2(adev);
3051
3052         if (amdgpu_sriov_vf(adev))
3053                 amdgpu_virt_release_full_gpu(adev, false);
3054
3055         return r;
3056 }
3057
3058 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3059 {
3060         int i, r;
3061
3062         static enum amd_ip_block_type ip_order[] = {
3063                 AMD_IP_BLOCK_TYPE_COMMON,
3064                 AMD_IP_BLOCK_TYPE_GMC,
3065                 AMD_IP_BLOCK_TYPE_PSP,
3066                 AMD_IP_BLOCK_TYPE_IH,
3067         };
3068
3069         for (i = 0; i < adev->num_ip_blocks; i++) {
3070                 int j;
3071                 struct amdgpu_ip_block *block;
3072
3073                 block = &adev->ip_blocks[i];
3074                 block->status.hw = false;
3075
3076                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3077
3078                         if (block->version->type != ip_order[j] ||
3079                                 !block->status.valid)
3080                                 continue;
3081
3082                         r = block->version->funcs->hw_init(adev);
3083                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3084                         if (r)
3085                                 return r;
3086                         block->status.hw = true;
3087                 }
3088         }
3089
3090         return 0;
3091 }
3092
3093 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3094 {
3095         int i, r;
3096
3097         static enum amd_ip_block_type ip_order[] = {
3098                 AMD_IP_BLOCK_TYPE_SMC,
3099                 AMD_IP_BLOCK_TYPE_DCE,
3100                 AMD_IP_BLOCK_TYPE_GFX,
3101                 AMD_IP_BLOCK_TYPE_SDMA,
3102                 AMD_IP_BLOCK_TYPE_UVD,
3103                 AMD_IP_BLOCK_TYPE_VCE,
3104                 AMD_IP_BLOCK_TYPE_VCN
3105         };
3106
3107         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3108                 int j;
3109                 struct amdgpu_ip_block *block;
3110
3111                 for (j = 0; j < adev->num_ip_blocks; j++) {
3112                         block = &adev->ip_blocks[j];
3113
3114                         if (block->version->type != ip_order[i] ||
3115                                 !block->status.valid ||
3116                                 block->status.hw)
3117                                 continue;
3118
3119                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3120                                 r = block->version->funcs->resume(adev);
3121                         else
3122                                 r = block->version->funcs->hw_init(adev);
3123
3124                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3125                         if (r)
3126                                 return r;
3127                         block->status.hw = true;
3128                 }
3129         }
3130
3131         return 0;
3132 }
3133
3134 /**
3135  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3136  *
3137  * @adev: amdgpu_device pointer
3138  *
3139  * First resume function for hardware IPs.  The list of all the hardware
3140  * IPs that make up the asic is walked and the resume callbacks are run for
3141  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3142  * after a suspend and updates the software state as necessary.  This
3143  * function is also used for restoring the GPU after a GPU reset.
3144  * Returns 0 on success, negative error code on failure.
3145  */
3146 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3147 {
3148         int i, r;
3149
3150         for (i = 0; i < adev->num_ip_blocks; i++) {
3151                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3152                         continue;
3153                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3154                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3155                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
3156
3157                         r = adev->ip_blocks[i].version->funcs->resume(adev);
3158                         if (r) {
3159                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
3160                                           adev->ip_blocks[i].version->funcs->name, r);
3161                                 return r;
3162                         }
3163                         adev->ip_blocks[i].status.hw = true;
3164                 }
3165         }
3166
3167         return 0;
3168 }
3169
3170 /**
3171  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3172  *
3173  * @adev: amdgpu_device pointer
3174  *
3175  * First resume function for hardware IPs.  The list of all the hardware
3176  * IPs that make up the asic is walked and the resume callbacks are run for
3177  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3178  * functional state after a suspend and updates the software state as
3179  * necessary.  This function is also used for restoring the GPU after a GPU
3180  * reset.
3181  * Returns 0 on success, negative error code on failure.
3182  */
3183 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3184 {
3185         int i, r;
3186
3187         for (i = 0; i < adev->num_ip_blocks; i++) {
3188                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3189                         continue;
3190                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3191                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3192                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3193                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3194                         continue;
3195                 r = adev->ip_blocks[i].version->funcs->resume(adev);
3196                 if (r) {
3197                         DRM_ERROR("resume of IP block <%s> failed %d\n",
3198                                   adev->ip_blocks[i].version->funcs->name, r);
3199                         return r;
3200                 }
3201                 adev->ip_blocks[i].status.hw = true;
3202         }
3203
3204         return 0;
3205 }
3206
3207 /**
3208  * amdgpu_device_ip_resume - run resume for hardware IPs
3209  *
3210  * @adev: amdgpu_device pointer
3211  *
3212  * Main resume function for hardware IPs.  The hardware IPs
3213  * are split into two resume functions because they are
3214  * are also used in in recovering from a GPU reset and some additional
3215  * steps need to be take between them.  In this case (S3/S4) they are
3216  * run sequentially.
3217  * Returns 0 on success, negative error code on failure.
3218  */
3219 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3220 {
3221         int r;
3222
3223         r = amdgpu_amdkfd_resume_iommu(adev);
3224         if (r)
3225                 return r;
3226
3227         r = amdgpu_device_ip_resume_phase1(adev);
3228         if (r)
3229                 return r;
3230
3231         r = amdgpu_device_fw_loading(adev);
3232         if (r)
3233                 return r;
3234
3235         r = amdgpu_device_ip_resume_phase2(adev);
3236
3237         return r;
3238 }
3239
3240 /**
3241  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3242  *
3243  * @adev: amdgpu_device pointer
3244  *
3245  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3246  */
3247 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3248 {
3249         if (amdgpu_sriov_vf(adev)) {
3250                 if (adev->is_atom_fw) {
3251                         if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3252                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3253                 } else {
3254                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3255                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3256                 }
3257
3258                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3259                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3260         }
3261 }
3262
3263 /**
3264  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3265  *
3266  * @asic_type: AMD asic type
3267  *
3268  * Check if there is DC (new modesetting infrastructre) support for an asic.
3269  * returns true if DC has support, false if not.
3270  */
3271 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3272 {
3273         switch (asic_type) {
3274 #ifdef CONFIG_DRM_AMDGPU_SI
3275         case CHIP_HAINAN:
3276 #endif
3277         case CHIP_TOPAZ:
3278                 /* chips with no display hardware */
3279                 return false;
3280 #if defined(CONFIG_DRM_AMD_DC)
3281         case CHIP_TAHITI:
3282         case CHIP_PITCAIRN:
3283         case CHIP_VERDE:
3284         case CHIP_OLAND:
3285                 /*
3286                  * We have systems in the wild with these ASICs that require
3287                  * LVDS and VGA support which is not supported with DC.
3288                  *
3289                  * Fallback to the non-DC driver here by default so as not to
3290                  * cause regressions.
3291                  */
3292 #if defined(CONFIG_DRM_AMD_DC_SI)
3293                 return amdgpu_dc > 0;
3294 #else
3295                 return false;
3296 #endif
3297         case CHIP_BONAIRE:
3298         case CHIP_KAVERI:
3299         case CHIP_KABINI:
3300         case CHIP_MULLINS:
3301                 /*
3302                  * We have systems in the wild with these ASICs that require
3303                  * VGA support which is not supported with DC.
3304                  *
3305                  * Fallback to the non-DC driver here by default so as not to
3306                  * cause regressions.
3307                  */
3308                 return amdgpu_dc > 0;
3309         default:
3310                 return amdgpu_dc != 0;
3311 #else
3312         default:
3313                 if (amdgpu_dc > 0)
3314                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3315                                          "but isn't supported by ASIC, ignoring\n");
3316                 return false;
3317 #endif
3318         }
3319 }
3320
3321 /**
3322  * amdgpu_device_has_dc_support - check if dc is supported
3323  *
3324  * @adev: amdgpu_device pointer
3325  *
3326  * Returns true for supported, false for not supported
3327  */
3328 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3329 {
3330         if (amdgpu_sriov_vf(adev) ||
3331             adev->enable_virtual_display ||
3332             (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3333                 return false;
3334
3335         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3336 }
3337
3338 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3339 {
3340         struct amdgpu_device *adev =
3341                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3342         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3343
3344         /* It's a bug to not have a hive within this function */
3345         if (WARN_ON(!hive))
3346                 return;
3347
3348         /*
3349          * Use task barrier to synchronize all xgmi reset works across the
3350          * hive. task_barrier_enter and task_barrier_exit will block
3351          * until all the threads running the xgmi reset works reach
3352          * those points. task_barrier_full will do both blocks.
3353          */
3354         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3355
3356                 task_barrier_enter(&hive->tb);
3357                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3358
3359                 if (adev->asic_reset_res)
3360                         goto fail;
3361
3362                 task_barrier_exit(&hive->tb);
3363                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3364
3365                 if (adev->asic_reset_res)
3366                         goto fail;
3367
3368                 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3369                     adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3370                         adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3371         } else {
3372
3373                 task_barrier_full(&hive->tb);
3374                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3375         }
3376
3377 fail:
3378         if (adev->asic_reset_res)
3379                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3380                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3381         amdgpu_put_xgmi_hive(hive);
3382 }
3383
3384 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3385 {
3386         char *input = amdgpu_lockup_timeout;
3387         char *timeout_setting = NULL;
3388         int index = 0;
3389         long timeout;
3390         int ret = 0;
3391
3392         /*
3393          * By default timeout for non compute jobs is 10000
3394          * and 60000 for compute jobs.
3395          * In SR-IOV or passthrough mode, timeout for compute
3396          * jobs are 60000 by default.
3397          */
3398         adev->gfx_timeout = msecs_to_jiffies(10000);
3399         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3400         if (amdgpu_sriov_vf(adev))
3401                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3402                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3403         else
3404                 adev->compute_timeout =  msecs_to_jiffies(60000);
3405
3406         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3407                 while ((timeout_setting = strsep(&input, ",")) &&
3408                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3409                         ret = kstrtol(timeout_setting, 0, &timeout);
3410                         if (ret)
3411                                 return ret;
3412
3413                         if (timeout == 0) {
3414                                 index++;
3415                                 continue;
3416                         } else if (timeout < 0) {
3417                                 timeout = MAX_SCHEDULE_TIMEOUT;
3418                                 dev_warn(adev->dev, "lockup timeout disabled");
3419                                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3420                         } else {
3421                                 timeout = msecs_to_jiffies(timeout);
3422                         }
3423
3424                         switch (index++) {
3425                         case 0:
3426                                 adev->gfx_timeout = timeout;
3427                                 break;
3428                         case 1:
3429                                 adev->compute_timeout = timeout;
3430                                 break;
3431                         case 2:
3432                                 adev->sdma_timeout = timeout;
3433                                 break;
3434                         case 3:
3435                                 adev->video_timeout = timeout;
3436                                 break;
3437                         default:
3438                                 break;
3439                         }
3440                 }
3441                 /*
3442                  * There is only one value specified and
3443                  * it should apply to all non-compute jobs.
3444                  */
3445                 if (index == 1) {
3446                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3447                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3448                                 adev->compute_timeout = adev->gfx_timeout;
3449                 }
3450         }
3451
3452         return ret;
3453 }
3454
3455 /**
3456  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3457  *
3458  * @adev: amdgpu_device pointer
3459  *
3460  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3461  */
3462 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3463 {
3464         struct iommu_domain *domain;
3465
3466         domain = iommu_get_domain_for_dev(adev->dev);
3467         if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3468                 adev->ram_is_direct_mapped = true;
3469 }
3470
3471 static const struct attribute *amdgpu_dev_attributes[] = {
3472         &dev_attr_product_name.attr,
3473         &dev_attr_product_number.attr,
3474         &dev_attr_serial_number.attr,
3475         &dev_attr_pcie_replay_count.attr,
3476         NULL
3477 };
3478
3479 /**
3480  * amdgpu_device_init - initialize the driver
3481  *
3482  * @adev: amdgpu_device pointer
3483  * @flags: driver flags
3484  *
3485  * Initializes the driver info and hw (all asics).
3486  * Returns 0 for success or an error on failure.
3487  * Called at driver startup.
3488  */
3489 int amdgpu_device_init(struct amdgpu_device *adev,
3490                        uint32_t flags)
3491 {
3492         struct drm_device *ddev = adev_to_drm(adev);
3493         struct pci_dev *pdev = adev->pdev;
3494         int r, i;
3495         bool px = false;
3496         u32 max_MBps;
3497
3498         adev->shutdown = false;
3499         adev->flags = flags;
3500
3501         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3502                 adev->asic_type = amdgpu_force_asic_type;
3503         else
3504                 adev->asic_type = flags & AMD_ASIC_MASK;
3505
3506         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3507         if (amdgpu_emu_mode == 1)
3508                 adev->usec_timeout *= 10;
3509         adev->gmc.gart_size = 512 * 1024 * 1024;
3510         adev->accel_working = false;
3511         adev->num_rings = 0;
3512         adev->mman.buffer_funcs = NULL;
3513         adev->mman.buffer_funcs_ring = NULL;
3514         adev->vm_manager.vm_pte_funcs = NULL;
3515         adev->vm_manager.vm_pte_num_scheds = 0;
3516         adev->gmc.gmc_funcs = NULL;
3517         adev->harvest_ip_mask = 0x0;
3518         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3519         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3520
3521         adev->smc_rreg = &amdgpu_invalid_rreg;
3522         adev->smc_wreg = &amdgpu_invalid_wreg;
3523         adev->pcie_rreg = &amdgpu_invalid_rreg;
3524         adev->pcie_wreg = &amdgpu_invalid_wreg;
3525         adev->pciep_rreg = &amdgpu_invalid_rreg;
3526         adev->pciep_wreg = &amdgpu_invalid_wreg;
3527         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3528         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3529         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3530         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3531         adev->didt_rreg = &amdgpu_invalid_rreg;
3532         adev->didt_wreg = &amdgpu_invalid_wreg;
3533         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3534         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3535         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3536         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3537
3538         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3539                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3540                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3541
3542         /* mutex initialization are all done here so we
3543          * can recall function without having locking issues */
3544         mutex_init(&adev->firmware.mutex);
3545         mutex_init(&adev->pm.mutex);
3546         mutex_init(&adev->gfx.gpu_clock_mutex);
3547         mutex_init(&adev->srbm_mutex);
3548         mutex_init(&adev->gfx.pipe_reserve_mutex);
3549         mutex_init(&adev->gfx.gfx_off_mutex);
3550         mutex_init(&adev->grbm_idx_mutex);
3551         mutex_init(&adev->mn_lock);
3552         mutex_init(&adev->virt.vf_errors.lock);
3553         hash_init(adev->mn_hash);
3554         mutex_init(&adev->psp.mutex);
3555         mutex_init(&adev->notifier_lock);
3556         mutex_init(&adev->pm.stable_pstate_ctx_lock);
3557         mutex_init(&adev->benchmark_mutex);
3558
3559         amdgpu_device_init_apu_flags(adev);
3560
3561         r = amdgpu_device_check_arguments(adev);
3562         if (r)
3563                 return r;
3564
3565         spin_lock_init(&adev->mmio_idx_lock);
3566         spin_lock_init(&adev->smc_idx_lock);
3567         spin_lock_init(&adev->pcie_idx_lock);
3568         spin_lock_init(&adev->uvd_ctx_idx_lock);
3569         spin_lock_init(&adev->didt_idx_lock);
3570         spin_lock_init(&adev->gc_cac_idx_lock);
3571         spin_lock_init(&adev->se_cac_idx_lock);
3572         spin_lock_init(&adev->audio_endpt_idx_lock);
3573         spin_lock_init(&adev->mm_stats.lock);
3574
3575         INIT_LIST_HEAD(&adev->shadow_list);
3576         mutex_init(&adev->shadow_list_lock);
3577
3578         INIT_LIST_HEAD(&adev->reset_list);
3579
3580         INIT_LIST_HEAD(&adev->ras_list);
3581
3582         INIT_DELAYED_WORK(&adev->delayed_init_work,
3583                           amdgpu_device_delayed_init_work_handler);
3584         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3585                           amdgpu_device_delay_enable_gfx_off);
3586
3587         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3588
3589         adev->gfx.gfx_off_req_count = 1;
3590         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3591
3592         atomic_set(&adev->throttling_logging_enabled, 1);
3593         /*
3594          * If throttling continues, logging will be performed every minute
3595          * to avoid log flooding. "-1" is subtracted since the thermal
3596          * throttling interrupt comes every second. Thus, the total logging
3597          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3598          * for throttling interrupt) = 60 seconds.
3599          */
3600         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3601         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3602
3603         /* Registers mapping */
3604         /* TODO: block userspace mapping of io register */
3605         if (adev->asic_type >= CHIP_BONAIRE) {
3606                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3607                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3608         } else {
3609                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3610                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3611         }
3612
3613         for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3614                 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3615
3616         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3617         if (adev->rmmio == NULL) {
3618                 return -ENOMEM;
3619         }
3620         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3621         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3622
3623         amdgpu_device_get_pcie_info(adev);
3624
3625         if (amdgpu_mcbp)
3626                 DRM_INFO("MCBP is enabled\n");
3627
3628         /*
3629          * Reset domain needs to be present early, before XGMI hive discovered
3630          * (if any) and intitialized to use reset sem and in_gpu reset flag
3631          * early on during init and before calling to RREG32.
3632          */
3633         adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3634         if (!adev->reset_domain)
3635                 return -ENOMEM;
3636
3637         /* detect hw virtualization here */
3638         amdgpu_detect_virtualization(adev);
3639
3640         r = amdgpu_device_get_job_timeout_settings(adev);
3641         if (r) {
3642                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3643                 return r;
3644         }
3645
3646         /* early init functions */
3647         r = amdgpu_device_ip_early_init(adev);
3648         if (r)
3649                 return r;
3650
3651         /* Enable TMZ based on IP_VERSION */
3652         amdgpu_gmc_tmz_set(adev);
3653
3654         amdgpu_gmc_noretry_set(adev);
3655         /* Need to get xgmi info early to decide the reset behavior*/
3656         if (adev->gmc.xgmi.supported) {
3657                 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3658                 if (r)
3659                         return r;
3660         }
3661
3662         /* enable PCIE atomic ops */
3663         if (amdgpu_sriov_vf(adev))
3664                 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3665                         adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3666                         (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3667         else
3668                 adev->have_atomics_support =
3669                         !pci_enable_atomic_ops_to_root(adev->pdev,
3670                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3671                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3672         if (!adev->have_atomics_support)
3673                 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3674
3675         /* doorbell bar mapping and doorbell index init*/
3676         amdgpu_device_doorbell_init(adev);
3677
3678         if (amdgpu_emu_mode == 1) {
3679                 /* post the asic on emulation mode */
3680                 emu_soc_asic_init(adev);
3681                 goto fence_driver_init;
3682         }
3683
3684         amdgpu_reset_init(adev);
3685
3686         /* detect if we are with an SRIOV vbios */
3687         amdgpu_device_detect_sriov_bios(adev);
3688
3689         /* check if we need to reset the asic
3690          *  E.g., driver was not cleanly unloaded previously, etc.
3691          */
3692         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3693                 if (adev->gmc.xgmi.num_physical_nodes) {
3694                         dev_info(adev->dev, "Pending hive reset.\n");
3695                         adev->gmc.xgmi.pending_reset = true;
3696                         /* Only need to init necessary block for SMU to handle the reset */
3697                         for (i = 0; i < adev->num_ip_blocks; i++) {
3698                                 if (!adev->ip_blocks[i].status.valid)
3699                                         continue;
3700                                 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3701                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3702                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3703                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3704                                         DRM_DEBUG("IP %s disabled for hw_init.\n",
3705                                                 adev->ip_blocks[i].version->funcs->name);
3706                                         adev->ip_blocks[i].status.hw = true;
3707                                 }
3708                         }
3709                 } else {
3710                         r = amdgpu_asic_reset(adev);
3711                         if (r) {
3712                                 dev_err(adev->dev, "asic reset on init failed\n");
3713                                 goto failed;
3714                         }
3715                 }
3716         }
3717
3718         pci_enable_pcie_error_reporting(adev->pdev);
3719
3720         /* Post card if necessary */
3721         if (amdgpu_device_need_post(adev)) {
3722                 if (!adev->bios) {
3723                         dev_err(adev->dev, "no vBIOS found\n");
3724                         r = -EINVAL;
3725                         goto failed;
3726                 }
3727                 DRM_INFO("GPU posting now...\n");
3728                 r = amdgpu_device_asic_init(adev);
3729                 if (r) {
3730                         dev_err(adev->dev, "gpu post error!\n");
3731                         goto failed;
3732                 }
3733         }
3734
3735         if (adev->is_atom_fw) {
3736                 /* Initialize clocks */
3737                 r = amdgpu_atomfirmware_get_clock_info(adev);
3738                 if (r) {
3739                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3740                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3741                         goto failed;
3742                 }
3743         } else {
3744                 /* Initialize clocks */
3745                 r = amdgpu_atombios_get_clock_info(adev);
3746                 if (r) {
3747                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3748                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3749                         goto failed;
3750                 }
3751                 /* init i2c buses */
3752                 if (!amdgpu_device_has_dc_support(adev))
3753                         amdgpu_atombios_i2c_init(adev);
3754         }
3755
3756 fence_driver_init:
3757         /* Fence driver */
3758         r = amdgpu_fence_driver_sw_init(adev);
3759         if (r) {
3760                 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3761                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3762                 goto failed;
3763         }
3764
3765         /* init the mode config */
3766         drm_mode_config_init(adev_to_drm(adev));
3767
3768         r = amdgpu_device_ip_init(adev);
3769         if (r) {
3770                 /* failed in exclusive mode due to timeout */
3771                 if (amdgpu_sriov_vf(adev) &&
3772                     !amdgpu_sriov_runtime(adev) &&
3773                     amdgpu_virt_mmio_blocked(adev) &&
3774                     !amdgpu_virt_wait_reset(adev)) {
3775                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3776                         /* Don't send request since VF is inactive. */
3777                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3778                         adev->virt.ops = NULL;
3779                         r = -EAGAIN;
3780                         goto release_ras_con;
3781                 }
3782                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3783                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3784                 goto release_ras_con;
3785         }
3786
3787         amdgpu_fence_driver_hw_init(adev);
3788
3789         dev_info(adev->dev,
3790                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3791                         adev->gfx.config.max_shader_engines,
3792                         adev->gfx.config.max_sh_per_se,
3793                         adev->gfx.config.max_cu_per_sh,
3794                         adev->gfx.cu_info.number);
3795
3796         adev->accel_working = true;
3797
3798         amdgpu_vm_check_compute_bug(adev);
3799
3800         /* Initialize the buffer migration limit. */
3801         if (amdgpu_moverate >= 0)
3802                 max_MBps = amdgpu_moverate;
3803         else
3804                 max_MBps = 8; /* Allow 8 MB/s. */
3805         /* Get a log2 for easy divisions. */
3806         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3807
3808         r = amdgpu_pm_sysfs_init(adev);
3809         if (r) {
3810                 adev->pm_sysfs_en = false;
3811                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3812         } else
3813                 adev->pm_sysfs_en = true;
3814
3815         r = amdgpu_ucode_sysfs_init(adev);
3816         if (r) {
3817                 adev->ucode_sysfs_en = false;
3818                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3819         } else
3820                 adev->ucode_sysfs_en = true;
3821
3822         r = amdgpu_psp_sysfs_init(adev);
3823         if (r) {
3824                 adev->psp_sysfs_en = false;
3825                 if (!amdgpu_sriov_vf(adev))
3826                         DRM_ERROR("Creating psp sysfs failed\n");
3827         } else
3828                 adev->psp_sysfs_en = true;
3829
3830         /*
3831          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3832          * Otherwise the mgpu fan boost feature will be skipped due to the
3833          * gpu instance is counted less.
3834          */
3835         amdgpu_register_gpu_instance(adev);
3836
3837         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3838          * explicit gating rather than handling it automatically.
3839          */
3840         if (!adev->gmc.xgmi.pending_reset) {
3841                 r = amdgpu_device_ip_late_init(adev);
3842                 if (r) {
3843                         dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3844                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3845                         goto release_ras_con;
3846                 }
3847                 /* must succeed. */
3848                 amdgpu_ras_resume(adev);
3849                 queue_delayed_work(system_wq, &adev->delayed_init_work,
3850                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3851         }
3852
3853         if (amdgpu_sriov_vf(adev))
3854                 flush_delayed_work(&adev->delayed_init_work);
3855
3856         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3857         if (r)
3858                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3859
3860         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3861                 r = amdgpu_pmu_init(adev);
3862         if (r)
3863                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3864
3865         /* Have stored pci confspace at hand for restore in sudden PCI error */
3866         if (amdgpu_device_cache_pci_state(adev->pdev))
3867                 pci_restore_state(pdev);
3868
3869         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3870         /* this will fail for cards that aren't VGA class devices, just
3871          * ignore it */
3872         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3873                 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3874
3875         if (amdgpu_device_supports_px(ddev)) {
3876                 px = true;
3877                 vga_switcheroo_register_client(adev->pdev,
3878                                                &amdgpu_switcheroo_ops, px);
3879                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3880         }
3881
3882         if (adev->gmc.xgmi.pending_reset)
3883                 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3884                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3885
3886         amdgpu_device_check_iommu_direct_map(adev);
3887
3888         return 0;
3889
3890 release_ras_con:
3891         amdgpu_release_ras_context(adev);
3892
3893 failed:
3894         amdgpu_vf_error_trans_all(adev);
3895
3896         return r;
3897 }
3898
3899 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3900 {
3901
3902         /* Clear all CPU mappings pointing to this device */
3903         unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3904
3905         /* Unmap all mapped bars - Doorbell, registers and VRAM */
3906         amdgpu_device_doorbell_fini(adev);
3907
3908         iounmap(adev->rmmio);
3909         adev->rmmio = NULL;
3910         if (adev->mman.aper_base_kaddr)
3911                 iounmap(adev->mman.aper_base_kaddr);
3912         adev->mman.aper_base_kaddr = NULL;
3913
3914         /* Memory manager related */
3915         if (!adev->gmc.xgmi.connected_to_cpu) {
3916                 arch_phys_wc_del(adev->gmc.vram_mtrr);
3917                 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3918         }
3919 }
3920
3921 /**
3922  * amdgpu_device_fini_hw - tear down the driver
3923  *
3924  * @adev: amdgpu_device pointer
3925  *
3926  * Tear down the driver info (all asics).
3927  * Called at driver shutdown.
3928  */
3929 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3930 {
3931         dev_info(adev->dev, "amdgpu: finishing device.\n");
3932         flush_delayed_work(&adev->delayed_init_work);
3933         adev->shutdown = true;
3934
3935         /* make sure IB test finished before entering exclusive mode
3936          * to avoid preemption on IB test
3937          * */
3938         if (amdgpu_sriov_vf(adev)) {
3939                 amdgpu_virt_request_full_gpu(adev, false);
3940                 amdgpu_virt_fini_data_exchange(adev);
3941         }
3942
3943         /* disable all interrupts */
3944         amdgpu_irq_disable_all(adev);
3945         if (adev->mode_info.mode_config_initialized){
3946                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3947                         drm_helper_force_disable_all(adev_to_drm(adev));
3948                 else
3949                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3950         }
3951         amdgpu_fence_driver_hw_fini(adev);
3952
3953         if (adev->mman.initialized) {
3954                 flush_delayed_work(&adev->mman.bdev.wq);
3955                 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3956         }
3957
3958         if (adev->pm_sysfs_en)
3959                 amdgpu_pm_sysfs_fini(adev);
3960         if (adev->ucode_sysfs_en)
3961                 amdgpu_ucode_sysfs_fini(adev);
3962         if (adev->psp_sysfs_en)
3963                 amdgpu_psp_sysfs_fini(adev);
3964         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3965
3966         /* disable ras feature must before hw fini */
3967         amdgpu_ras_pre_fini(adev);
3968
3969         amdgpu_device_ip_fini_early(adev);
3970
3971         amdgpu_irq_fini_hw(adev);
3972
3973         if (adev->mman.initialized)
3974                 ttm_device_clear_dma_mappings(&adev->mman.bdev);
3975
3976         amdgpu_gart_dummy_page_fini(adev);
3977
3978         if (drm_dev_is_unplugged(adev_to_drm(adev)))
3979                 amdgpu_device_unmap_mmio(adev);
3980
3981 }
3982
3983 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3984 {
3985         int idx;
3986
3987         amdgpu_fence_driver_sw_fini(adev);
3988         amdgpu_device_ip_fini(adev);
3989         release_firmware(adev->firmware.gpu_info_fw);
3990         adev->firmware.gpu_info_fw = NULL;
3991         adev->accel_working = false;
3992
3993         amdgpu_reset_fini(adev);
3994
3995         /* free i2c buses */
3996         if (!amdgpu_device_has_dc_support(adev))
3997                 amdgpu_i2c_fini(adev);
3998
3999         if (amdgpu_emu_mode != 1)
4000                 amdgpu_atombios_fini(adev);
4001
4002         kfree(adev->bios);
4003         adev->bios = NULL;
4004         if (amdgpu_device_supports_px(adev_to_drm(adev))) {
4005                 vga_switcheroo_unregister_client(adev->pdev);
4006                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4007         }
4008         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4009                 vga_client_unregister(adev->pdev);
4010
4011         if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4012
4013                 iounmap(adev->rmmio);
4014                 adev->rmmio = NULL;
4015                 amdgpu_device_doorbell_fini(adev);
4016                 drm_dev_exit(idx);
4017         }
4018
4019         if (IS_ENABLED(CONFIG_PERF_EVENTS))
4020                 amdgpu_pmu_fini(adev);
4021         if (adev->mman.discovery_bin)
4022                 amdgpu_discovery_fini(adev);
4023
4024         amdgpu_reset_put_reset_domain(adev->reset_domain);
4025         adev->reset_domain = NULL;
4026
4027         kfree(adev->pci_state);
4028
4029 }
4030
4031 /**
4032  * amdgpu_device_evict_resources - evict device resources
4033  * @adev: amdgpu device object
4034  *
4035  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4036  * of the vram memory type. Mainly used for evicting device resources
4037  * at suspend time.
4038  *
4039  */
4040 static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
4041 {
4042         /* No need to evict vram on APUs for suspend to ram or s2idle */
4043         if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4044                 return;
4045
4046         if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
4047                 DRM_WARN("evicting device resources failed\n");
4048
4049 }
4050
4051 /*
4052  * Suspend & resume.
4053  */
4054 /**
4055  * amdgpu_device_suspend - initiate device suspend
4056  *
4057  * @dev: drm dev pointer
4058  * @fbcon : notify the fbdev of suspend
4059  *
4060  * Puts the hw in the suspend state (all asics).
4061  * Returns 0 for success or an error on failure.
4062  * Called at driver suspend.
4063  */
4064 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4065 {
4066         struct amdgpu_device *adev = drm_to_adev(dev);
4067
4068         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4069                 return 0;
4070
4071         adev->in_suspend = true;
4072
4073         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4074                 DRM_WARN("smart shift update failed\n");
4075
4076         drm_kms_helper_poll_disable(dev);
4077
4078         if (fbcon)
4079                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4080
4081         cancel_delayed_work_sync(&adev->delayed_init_work);
4082
4083         amdgpu_ras_suspend(adev);
4084
4085         amdgpu_device_ip_suspend_phase1(adev);
4086
4087         if (!adev->in_s0ix)
4088                 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4089
4090         amdgpu_device_evict_resources(adev);
4091
4092         amdgpu_fence_driver_hw_fini(adev);
4093
4094         amdgpu_device_ip_suspend_phase2(adev);
4095
4096         return 0;
4097 }
4098
4099 /**
4100  * amdgpu_device_resume - initiate device resume
4101  *
4102  * @dev: drm dev pointer
4103  * @fbcon : notify the fbdev of resume
4104  *
4105  * Bring the hw back to operating state (all asics).
4106  * Returns 0 for success or an error on failure.
4107  * Called at driver resume.
4108  */
4109 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4110 {
4111         struct amdgpu_device *adev = drm_to_adev(dev);
4112         int r = 0;
4113
4114         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4115                 return 0;
4116
4117         if (adev->in_s0ix)
4118                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4119
4120         /* post card */
4121         if (amdgpu_device_need_post(adev)) {
4122                 r = amdgpu_device_asic_init(adev);
4123                 if (r)
4124                         dev_err(adev->dev, "amdgpu asic init failed\n");
4125         }
4126
4127         r = amdgpu_device_ip_resume(adev);
4128         if (r) {
4129                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4130                 return r;
4131         }
4132         amdgpu_fence_driver_hw_init(adev);
4133
4134         r = amdgpu_device_ip_late_init(adev);
4135         if (r)
4136                 return r;
4137
4138         queue_delayed_work(system_wq, &adev->delayed_init_work,
4139                            msecs_to_jiffies(AMDGPU_RESUME_MS));
4140
4141         if (!adev->in_s0ix) {
4142                 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4143                 if (r)
4144                         return r;
4145         }
4146
4147         /* Make sure IB tests flushed */
4148         flush_delayed_work(&adev->delayed_init_work);
4149
4150         if (fbcon)
4151                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4152
4153         drm_kms_helper_poll_enable(dev);
4154
4155         amdgpu_ras_resume(adev);
4156
4157         /*
4158          * Most of the connector probing functions try to acquire runtime pm
4159          * refs to ensure that the GPU is powered on when connector polling is
4160          * performed. Since we're calling this from a runtime PM callback,
4161          * trying to acquire rpm refs will cause us to deadlock.
4162          *
4163          * Since we're guaranteed to be holding the rpm lock, it's safe to
4164          * temporarily disable the rpm helpers so this doesn't deadlock us.
4165          */
4166 #ifdef CONFIG_PM
4167         dev->dev->power.disable_depth++;
4168 #endif
4169         if (!amdgpu_device_has_dc_support(adev))
4170                 drm_helper_hpd_irq_event(dev);
4171         else
4172                 drm_kms_helper_hotplug_event(dev);
4173 #ifdef CONFIG_PM
4174         dev->dev->power.disable_depth--;
4175 #endif
4176         adev->in_suspend = false;
4177
4178         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4179                 DRM_WARN("smart shift update failed\n");
4180
4181         return 0;
4182 }
4183
4184 /**
4185  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4186  *
4187  * @adev: amdgpu_device pointer
4188  *
4189  * The list of all the hardware IPs that make up the asic is walked and
4190  * the check_soft_reset callbacks are run.  check_soft_reset determines
4191  * if the asic is still hung or not.
4192  * Returns true if any of the IPs are still in a hung state, false if not.
4193  */
4194 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4195 {
4196         int i;
4197         bool asic_hang = false;
4198
4199         if (amdgpu_sriov_vf(adev))
4200                 return true;
4201
4202         if (amdgpu_asic_need_full_reset(adev))
4203                 return true;
4204
4205         for (i = 0; i < adev->num_ip_blocks; i++) {
4206                 if (!adev->ip_blocks[i].status.valid)
4207                         continue;
4208                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4209                         adev->ip_blocks[i].status.hang =
4210                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4211                 if (adev->ip_blocks[i].status.hang) {
4212                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4213                         asic_hang = true;
4214                 }
4215         }
4216         return asic_hang;
4217 }
4218
4219 /**
4220  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4221  *
4222  * @adev: amdgpu_device pointer
4223  *
4224  * The list of all the hardware IPs that make up the asic is walked and the
4225  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4226  * handles any IP specific hardware or software state changes that are
4227  * necessary for a soft reset to succeed.
4228  * Returns 0 on success, negative error code on failure.
4229  */
4230 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4231 {
4232         int i, r = 0;
4233
4234         for (i = 0; i < adev->num_ip_blocks; i++) {
4235                 if (!adev->ip_blocks[i].status.valid)
4236                         continue;
4237                 if (adev->ip_blocks[i].status.hang &&
4238                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4239                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4240                         if (r)
4241                                 return r;
4242                 }
4243         }
4244
4245         return 0;
4246 }
4247
4248 /**
4249  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4250  *
4251  * @adev: amdgpu_device pointer
4252  *
4253  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4254  * reset is necessary to recover.
4255  * Returns true if a full asic reset is required, false if not.
4256  */
4257 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4258 {
4259         int i;
4260
4261         if (amdgpu_asic_need_full_reset(adev))
4262                 return true;
4263
4264         for (i = 0; i < adev->num_ip_blocks; i++) {
4265                 if (!adev->ip_blocks[i].status.valid)
4266                         continue;
4267                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4268                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4269                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4270                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4271                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4272                         if (adev->ip_blocks[i].status.hang) {
4273                                 dev_info(adev->dev, "Some block need full reset!\n");
4274                                 return true;
4275                         }
4276                 }
4277         }
4278         return false;
4279 }
4280
4281 /**
4282  * amdgpu_device_ip_soft_reset - do a soft reset
4283  *
4284  * @adev: amdgpu_device pointer
4285  *
4286  * The list of all the hardware IPs that make up the asic is walked and the
4287  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4288  * IP specific hardware or software state changes that are necessary to soft
4289  * reset the IP.
4290  * Returns 0 on success, negative error code on failure.
4291  */
4292 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4293 {
4294         int i, r = 0;
4295
4296         for (i = 0; i < adev->num_ip_blocks; i++) {
4297                 if (!adev->ip_blocks[i].status.valid)
4298                         continue;
4299                 if (adev->ip_blocks[i].status.hang &&
4300                     adev->ip_blocks[i].version->funcs->soft_reset) {
4301                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4302                         if (r)
4303                                 return r;
4304                 }
4305         }
4306
4307         return 0;
4308 }
4309
4310 /**
4311  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4312  *
4313  * @adev: amdgpu_device pointer
4314  *
4315  * The list of all the hardware IPs that make up the asic is walked and the
4316  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4317  * handles any IP specific hardware or software state changes that are
4318  * necessary after the IP has been soft reset.
4319  * Returns 0 on success, negative error code on failure.
4320  */
4321 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4322 {
4323         int i, r = 0;
4324
4325         for (i = 0; i < adev->num_ip_blocks; i++) {
4326                 if (!adev->ip_blocks[i].status.valid)
4327                         continue;
4328                 if (adev->ip_blocks[i].status.hang &&
4329                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4330                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4331                 if (r)
4332                         return r;
4333         }
4334
4335         return 0;
4336 }
4337
4338 /**
4339  * amdgpu_device_recover_vram - Recover some VRAM contents
4340  *
4341  * @adev: amdgpu_device pointer
4342  *
4343  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4344  * restore things like GPUVM page tables after a GPU reset where
4345  * the contents of VRAM might be lost.
4346  *
4347  * Returns:
4348  * 0 on success, negative error code on failure.
4349  */
4350 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4351 {
4352         struct dma_fence *fence = NULL, *next = NULL;
4353         struct amdgpu_bo *shadow;
4354         struct amdgpu_bo_vm *vmbo;
4355         long r = 1, tmo;
4356
4357         if (amdgpu_sriov_runtime(adev))
4358                 tmo = msecs_to_jiffies(8000);
4359         else
4360                 tmo = msecs_to_jiffies(100);
4361
4362         dev_info(adev->dev, "recover vram bo from shadow start\n");
4363         mutex_lock(&adev->shadow_list_lock);
4364         list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4365                 shadow = &vmbo->bo;
4366                 /* No need to recover an evicted BO */
4367                 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4368                     shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4369                     shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4370                         continue;
4371
4372                 r = amdgpu_bo_restore_shadow(shadow, &next);
4373                 if (r)
4374                         break;
4375
4376                 if (fence) {
4377                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4378                         dma_fence_put(fence);
4379                         fence = next;
4380                         if (tmo == 0) {
4381                                 r = -ETIMEDOUT;
4382                                 break;
4383                         } else if (tmo < 0) {
4384                                 r = tmo;
4385                                 break;
4386                         }
4387                 } else {
4388                         fence = next;
4389                 }
4390         }
4391         mutex_unlock(&adev->shadow_list_lock);
4392
4393         if (fence)
4394                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4395         dma_fence_put(fence);
4396
4397         if (r < 0 || tmo <= 0) {
4398                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4399                 return -EIO;
4400         }
4401
4402         dev_info(adev->dev, "recover vram bo from shadow done\n");
4403         return 0;
4404 }
4405
4406
4407 /**
4408  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4409  *
4410  * @adev: amdgpu_device pointer
4411  * @from_hypervisor: request from hypervisor
4412  *
4413  * do VF FLR and reinitialize Asic
4414  * return 0 means succeeded otherwise failed
4415  */
4416 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4417                                      bool from_hypervisor)
4418 {
4419         int r;
4420         struct amdgpu_hive_info *hive = NULL;
4421         int retry_limit = 0;
4422
4423 retry:
4424         amdgpu_amdkfd_pre_reset(adev);
4425
4426         if (from_hypervisor)
4427                 r = amdgpu_virt_request_full_gpu(adev, true);
4428         else
4429                 r = amdgpu_virt_reset_gpu(adev);
4430         if (r)
4431                 return r;
4432
4433         /* Resume IP prior to SMC */
4434         r = amdgpu_device_ip_reinit_early_sriov(adev);
4435         if (r)
4436                 goto error;
4437
4438         amdgpu_virt_init_data_exchange(adev);
4439
4440         r = amdgpu_device_fw_loading(adev);
4441         if (r)
4442                 return r;
4443
4444         /* now we are okay to resume SMC/CP/SDMA */
4445         r = amdgpu_device_ip_reinit_late_sriov(adev);
4446         if (r)
4447                 goto error;
4448
4449         hive = amdgpu_get_xgmi_hive(adev);
4450         /* Update PSP FW topology after reset */
4451         if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4452                 r = amdgpu_xgmi_update_topology(hive, adev);
4453
4454         if (hive)
4455                 amdgpu_put_xgmi_hive(hive);
4456
4457         if (!r) {
4458                 amdgpu_irq_gpu_reset_resume_helper(adev);
4459                 r = amdgpu_ib_ring_tests(adev);
4460
4461                 amdgpu_amdkfd_post_reset(adev);
4462         }
4463
4464 error:
4465         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4466                 amdgpu_inc_vram_lost(adev);
4467                 r = amdgpu_device_recover_vram(adev);
4468         }
4469         amdgpu_virt_release_full_gpu(adev, true);
4470
4471         if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4472                 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4473                         retry_limit++;
4474                         goto retry;
4475                 } else
4476                         DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4477         }
4478
4479         return r;
4480 }
4481
4482 /**
4483  * amdgpu_device_has_job_running - check if there is any job in mirror list
4484  *
4485  * @adev: amdgpu_device pointer
4486  *
4487  * check if there is any job in mirror list
4488  */
4489 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4490 {
4491         int i;
4492         struct drm_sched_job *job;
4493
4494         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4495                 struct amdgpu_ring *ring = adev->rings[i];
4496
4497                 if (!ring || !ring->sched.thread)
4498                         continue;
4499
4500                 spin_lock(&ring->sched.job_list_lock);
4501                 job = list_first_entry_or_null(&ring->sched.pending_list,
4502                                                struct drm_sched_job, list);
4503                 spin_unlock(&ring->sched.job_list_lock);
4504                 if (job)
4505                         return true;
4506         }
4507         return false;
4508 }
4509
4510 /**
4511  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4512  *
4513  * @adev: amdgpu_device pointer
4514  *
4515  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4516  * a hung GPU.
4517  */
4518 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4519 {
4520         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4521                 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4522                 return false;
4523         }
4524
4525         if (amdgpu_gpu_recovery == 0)
4526                 goto disabled;
4527
4528         if (amdgpu_sriov_vf(adev))
4529                 return true;
4530
4531         if (amdgpu_gpu_recovery == -1) {
4532                 switch (adev->asic_type) {
4533 #ifdef CONFIG_DRM_AMDGPU_SI
4534                 case CHIP_VERDE:
4535                 case CHIP_TAHITI:
4536                 case CHIP_PITCAIRN:
4537                 case CHIP_OLAND:
4538                 case CHIP_HAINAN:
4539 #endif
4540 #ifdef CONFIG_DRM_AMDGPU_CIK
4541                 case CHIP_KAVERI:
4542                 case CHIP_KABINI:
4543                 case CHIP_MULLINS:
4544 #endif
4545                 case CHIP_CARRIZO:
4546                 case CHIP_STONEY:
4547                 case CHIP_CYAN_SKILLFISH:
4548                         goto disabled;
4549                 default:
4550                         break;
4551                 }
4552         }
4553
4554         return true;
4555
4556 disabled:
4557                 dev_info(adev->dev, "GPU recovery disabled.\n");
4558                 return false;
4559 }
4560
4561 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4562 {
4563         u32 i;
4564         int ret = 0;
4565
4566         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4567
4568         dev_info(adev->dev, "GPU mode1 reset\n");
4569
4570         /* disable BM */
4571         pci_clear_master(adev->pdev);
4572
4573         amdgpu_device_cache_pci_state(adev->pdev);
4574
4575         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4576                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4577                 ret = amdgpu_dpm_mode1_reset(adev);
4578         } else {
4579                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4580                 ret = psp_gpu_reset(adev);
4581         }
4582
4583         if (ret)
4584                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4585
4586         amdgpu_device_load_pci_state(adev->pdev);
4587
4588         /* wait for asic to come out of reset */
4589         for (i = 0; i < adev->usec_timeout; i++) {
4590                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4591
4592                 if (memsize != 0xffffffff)
4593                         break;
4594                 udelay(1);
4595         }
4596
4597         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4598         return ret;
4599 }
4600
4601 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4602                                  struct amdgpu_reset_context *reset_context)
4603 {
4604         int i, r = 0;
4605         struct amdgpu_job *job = NULL;
4606         bool need_full_reset =
4607                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4608
4609         if (reset_context->reset_req_dev == adev)
4610                 job = reset_context->job;
4611
4612         if (amdgpu_sriov_vf(adev)) {
4613                 /* stop the data exchange thread */
4614                 amdgpu_virt_fini_data_exchange(adev);
4615         }
4616
4617         amdgpu_fence_driver_isr_toggle(adev, true);
4618
4619         /* block all schedulers and reset given job's ring */
4620         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4621                 struct amdgpu_ring *ring = adev->rings[i];
4622
4623                 if (!ring || !ring->sched.thread)
4624                         continue;
4625
4626                 /*clear job fence from fence drv to avoid force_completion
4627                  *leave NULL and vm flush fence in fence drv */
4628                 amdgpu_fence_driver_clear_job_fences(ring);
4629
4630                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4631                 amdgpu_fence_driver_force_completion(ring);
4632         }
4633
4634         amdgpu_fence_driver_isr_toggle(adev, false);
4635
4636         if (job && job->vm)
4637                 drm_sched_increase_karma(&job->base);
4638
4639         r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4640         /* If reset handler not implemented, continue; otherwise return */
4641         if (r == -ENOSYS)
4642                 r = 0;
4643         else
4644                 return r;
4645
4646         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4647         if (!amdgpu_sriov_vf(adev)) {
4648
4649                 if (!need_full_reset)
4650                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4651
4652                 if (!need_full_reset) {
4653                         amdgpu_device_ip_pre_soft_reset(adev);
4654                         r = amdgpu_device_ip_soft_reset(adev);
4655                         amdgpu_device_ip_post_soft_reset(adev);
4656                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4657                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4658                                 need_full_reset = true;
4659                         }
4660                 }
4661
4662                 if (need_full_reset)
4663                         r = amdgpu_device_ip_suspend(adev);
4664                 if (need_full_reset)
4665                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4666                 else
4667                         clear_bit(AMDGPU_NEED_FULL_RESET,
4668                                   &reset_context->flags);
4669         }
4670
4671         return r;
4672 }
4673
4674 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4675 {
4676         int i;
4677
4678         lockdep_assert_held(&adev->reset_domain->sem);
4679
4680         for (i = 0; i < adev->num_regs; i++) {
4681                 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4682                 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4683                                              adev->reset_dump_reg_value[i]);
4684         }
4685
4686         return 0;
4687 }
4688
4689 #ifdef CONFIG_DEV_COREDUMP
4690 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4691                 size_t count, void *data, size_t datalen)
4692 {
4693         struct drm_printer p;
4694         struct amdgpu_device *adev = data;
4695         struct drm_print_iterator iter;
4696         int i;
4697
4698         iter.data = buffer;
4699         iter.offset = 0;
4700         iter.start = offset;
4701         iter.remain = count;
4702
4703         p = drm_coredump_printer(&iter);
4704
4705         drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4706         drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4707         drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4708         drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4709         if (adev->reset_task_info.pid)
4710                 drm_printf(&p, "process_name: %s PID: %d\n",
4711                            adev->reset_task_info.process_name,
4712                            adev->reset_task_info.pid);
4713
4714         if (adev->reset_vram_lost)
4715                 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4716         if (adev->num_regs) {
4717                 drm_printf(&p, "AMDGPU register dumps:\nOffset:     Value:\n");
4718
4719                 for (i = 0; i < adev->num_regs; i++)
4720                         drm_printf(&p, "0x%08x: 0x%08x\n",
4721                                    adev->reset_dump_reg_list[i],
4722                                    adev->reset_dump_reg_value[i]);
4723         }
4724
4725         return count - iter.remain;
4726 }
4727
4728 static void amdgpu_devcoredump_free(void *data)
4729 {
4730 }
4731
4732 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4733 {
4734         struct drm_device *dev = adev_to_drm(adev);
4735
4736         ktime_get_ts64(&adev->reset_time);
4737         dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4738                       amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4739 }
4740 #endif
4741
4742 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4743                          struct amdgpu_reset_context *reset_context)
4744 {
4745         struct amdgpu_device *tmp_adev = NULL;
4746         bool need_full_reset, skip_hw_reset, vram_lost = false;
4747         int r = 0;
4748
4749         /* Try reset handler method first */
4750         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4751                                     reset_list);
4752         amdgpu_reset_reg_dumps(tmp_adev);
4753
4754         reset_context->reset_device_list = device_list_handle;
4755         r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4756         /* If reset handler not implemented, continue; otherwise return */
4757         if (r == -ENOSYS)
4758                 r = 0;
4759         else
4760                 return r;
4761
4762         /* Reset handler not implemented, use the default method */
4763         need_full_reset =
4764                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4765         skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4766
4767         /*
4768          * ASIC reset has to be done on all XGMI hive nodes ASAP
4769          * to allow proper links negotiation in FW (within 1 sec)
4770          */
4771         if (!skip_hw_reset && need_full_reset) {
4772                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4773                         /* For XGMI run all resets in parallel to speed up the process */
4774                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4775                                 tmp_adev->gmc.xgmi.pending_reset = false;
4776                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4777                                         r = -EALREADY;
4778                         } else
4779                                 r = amdgpu_asic_reset(tmp_adev);
4780
4781                         if (r) {
4782                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4783                                          r, adev_to_drm(tmp_adev)->unique);
4784                                 break;
4785                         }
4786                 }
4787
4788                 /* For XGMI wait for all resets to complete before proceed */
4789                 if (!r) {
4790                         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4791                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4792                                         flush_work(&tmp_adev->xgmi_reset_work);
4793                                         r = tmp_adev->asic_reset_res;
4794                                         if (r)
4795                                                 break;
4796                                 }
4797                         }
4798                 }
4799         }
4800
4801         if (!r && amdgpu_ras_intr_triggered()) {
4802                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4803                         if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4804                             tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4805                                 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4806                 }
4807
4808                 amdgpu_ras_intr_cleared();
4809         }
4810
4811         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4812                 if (need_full_reset) {
4813                         /* post card */
4814                         r = amdgpu_device_asic_init(tmp_adev);
4815                         if (r) {
4816                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4817                         } else {
4818                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4819                                 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4820                                 if (r)
4821                                         goto out;
4822
4823                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4824                                 if (r)
4825                                         goto out;
4826
4827                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4828 #ifdef CONFIG_DEV_COREDUMP
4829                                 tmp_adev->reset_vram_lost = vram_lost;
4830                                 memset(&tmp_adev->reset_task_info, 0,
4831                                                 sizeof(tmp_adev->reset_task_info));
4832                                 if (reset_context->job && reset_context->job->vm)
4833                                         tmp_adev->reset_task_info =
4834                                                 reset_context->job->vm->task_info;
4835                                 amdgpu_reset_capture_coredumpm(tmp_adev);
4836 #endif
4837                                 if (vram_lost) {
4838                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4839                                         amdgpu_inc_vram_lost(tmp_adev);
4840                                 }
4841
4842                                 r = amdgpu_device_fw_loading(tmp_adev);
4843                                 if (r)
4844                                         return r;
4845
4846                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4847                                 if (r)
4848                                         goto out;
4849
4850                                 if (vram_lost)
4851                                         amdgpu_device_fill_reset_magic(tmp_adev);
4852
4853                                 /*
4854                                  * Add this ASIC as tracked as reset was already
4855                                  * complete successfully.
4856                                  */
4857                                 amdgpu_register_gpu_instance(tmp_adev);
4858
4859                                 if (!reset_context->hive &&
4860                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4861                                         amdgpu_xgmi_add_device(tmp_adev);
4862
4863                                 r = amdgpu_device_ip_late_init(tmp_adev);
4864                                 if (r)
4865                                         goto out;
4866
4867                                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4868
4869                                 /*
4870                                  * The GPU enters bad state once faulty pages
4871                                  * by ECC has reached the threshold, and ras
4872                                  * recovery is scheduled next. So add one check
4873                                  * here to break recovery if it indeed exceeds
4874                                  * bad page threshold, and remind user to
4875                                  * retire this GPU or setting one bigger
4876                                  * bad_page_threshold value to fix this once
4877                                  * probing driver again.
4878                                  */
4879                                 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4880                                         /* must succeed. */
4881                                         amdgpu_ras_resume(tmp_adev);
4882                                 } else {
4883                                         r = -EINVAL;
4884                                         goto out;
4885                                 }
4886
4887                                 /* Update PSP FW topology after reset */
4888                                 if (reset_context->hive &&
4889                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4890                                         r = amdgpu_xgmi_update_topology(
4891                                                 reset_context->hive, tmp_adev);
4892                         }
4893                 }
4894
4895 out:
4896                 if (!r) {
4897                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4898                         r = amdgpu_ib_ring_tests(tmp_adev);
4899                         if (r) {
4900                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4901                                 need_full_reset = true;
4902                                 r = -EAGAIN;
4903                                 goto end;
4904                         }
4905                 }
4906
4907                 if (!r)
4908                         r = amdgpu_device_recover_vram(tmp_adev);
4909                 else
4910                         tmp_adev->asic_reset_res = r;
4911         }
4912
4913 end:
4914         if (need_full_reset)
4915                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4916         else
4917                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4918         return r;
4919 }
4920
4921 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
4922 {
4923
4924         switch (amdgpu_asic_reset_method(adev)) {
4925         case AMD_RESET_METHOD_MODE1:
4926                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4927                 break;
4928         case AMD_RESET_METHOD_MODE2:
4929                 adev->mp1_state = PP_MP1_STATE_RESET;
4930                 break;
4931         default:
4932                 adev->mp1_state = PP_MP1_STATE_NONE;
4933                 break;
4934         }
4935 }
4936
4937 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
4938 {
4939         amdgpu_vf_error_trans_all(adev);
4940         adev->mp1_state = PP_MP1_STATE_NONE;
4941 }
4942
4943 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4944 {
4945         struct pci_dev *p = NULL;
4946
4947         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4948                         adev->pdev->bus->number, 1);
4949         if (p) {
4950                 pm_runtime_enable(&(p->dev));
4951                 pm_runtime_resume(&(p->dev));
4952         }
4953 }
4954
4955 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4956 {
4957         enum amd_reset_method reset_method;
4958         struct pci_dev *p = NULL;
4959         u64 expires;
4960
4961         /*
4962          * For now, only BACO and mode1 reset are confirmed
4963          * to suffer the audio issue without proper suspended.
4964          */
4965         reset_method = amdgpu_asic_reset_method(adev);
4966         if ((reset_method != AMD_RESET_METHOD_BACO) &&
4967              (reset_method != AMD_RESET_METHOD_MODE1))
4968                 return -EINVAL;
4969
4970         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4971                         adev->pdev->bus->number, 1);
4972         if (!p)
4973                 return -ENODEV;
4974
4975         expires = pm_runtime_autosuspend_expiration(&(p->dev));
4976         if (!expires)
4977                 /*
4978                  * If we cannot get the audio device autosuspend delay,
4979                  * a fixed 4S interval will be used. Considering 3S is
4980                  * the audio controller default autosuspend delay setting.
4981                  * 4S used here is guaranteed to cover that.
4982                  */
4983                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4984
4985         while (!pm_runtime_status_suspended(&(p->dev))) {
4986                 if (!pm_runtime_suspend(&(p->dev)))
4987                         break;
4988
4989                 if (expires < ktime_get_mono_fast_ns()) {
4990                         dev_warn(adev->dev, "failed to suspend display audio\n");
4991                         /* TODO: abort the succeeding gpu reset? */
4992                         return -ETIMEDOUT;
4993                 }
4994         }
4995
4996         pm_runtime_disable(&(p->dev));
4997
4998         return 0;
4999 }
5000
5001 static void amdgpu_device_recheck_guilty_jobs(
5002         struct amdgpu_device *adev, struct list_head *device_list_handle,
5003         struct amdgpu_reset_context *reset_context)
5004 {
5005         int i, r = 0;
5006
5007         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5008                 struct amdgpu_ring *ring = adev->rings[i];
5009                 int ret = 0;
5010                 struct drm_sched_job *s_job;
5011
5012                 if (!ring || !ring->sched.thread)
5013                         continue;
5014
5015                 s_job = list_first_entry_or_null(&ring->sched.pending_list,
5016                                 struct drm_sched_job, list);
5017                 if (s_job == NULL)
5018                         continue;
5019
5020                 /* clear job's guilty and depend the folowing step to decide the real one */
5021                 drm_sched_reset_karma(s_job);
5022                 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
5023
5024                 if (!s_job->s_fence->parent) {
5025                         DRM_WARN("Failed to get a HW fence for job!");
5026                         continue;
5027                 }
5028
5029                 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
5030                 if (ret == 0) { /* timeout */
5031                         DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
5032                                                 ring->sched.name, s_job->id);
5033
5034
5035                         amdgpu_fence_driver_isr_toggle(adev, true);
5036
5037                         /* Clear this failed job from fence array */
5038                         amdgpu_fence_driver_clear_job_fences(ring);
5039
5040                         amdgpu_fence_driver_isr_toggle(adev, false);
5041
5042                         /* Since the job won't signal and we go for
5043                          * another resubmit drop this parent pointer
5044                          */
5045                         dma_fence_put(s_job->s_fence->parent);
5046                         s_job->s_fence->parent = NULL;
5047
5048                         /* set guilty */
5049                         drm_sched_increase_karma(s_job);
5050 retry:
5051                         /* do hw reset */
5052                         if (amdgpu_sriov_vf(adev)) {
5053                                 amdgpu_virt_fini_data_exchange(adev);
5054                                 r = amdgpu_device_reset_sriov(adev, false);
5055                                 if (r)
5056                                         adev->asic_reset_res = r;
5057                         } else {
5058                                 clear_bit(AMDGPU_SKIP_HW_RESET,
5059                                           &reset_context->flags);
5060                                 r = amdgpu_do_asic_reset(device_list_handle,
5061                                                          reset_context);
5062                                 if (r && r == -EAGAIN)
5063                                         goto retry;
5064                         }
5065
5066                         /*
5067                          * add reset counter so that the following
5068                          * resubmitted job could flush vmid
5069                          */
5070                         atomic_inc(&adev->gpu_reset_counter);
5071                         continue;
5072                 }
5073
5074                 /* got the hw fence, signal finished fence */
5075                 atomic_dec(ring->sched.score);
5076                 dma_fence_get(&s_job->s_fence->finished);
5077                 dma_fence_signal(&s_job->s_fence->finished);
5078                 dma_fence_put(&s_job->s_fence->finished);
5079
5080                 /* remove node from list and free the job */
5081                 spin_lock(&ring->sched.job_list_lock);
5082                 list_del_init(&s_job->list);
5083                 spin_unlock(&ring->sched.job_list_lock);
5084                 ring->sched.ops->free_job(s_job);
5085         }
5086 }
5087
5088 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5089 {
5090         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5091
5092 #if defined(CONFIG_DEBUG_FS)
5093         if (!amdgpu_sriov_vf(adev))
5094                 cancel_work(&adev->reset_work);
5095 #endif
5096
5097         if (adev->kfd.dev)
5098                 cancel_work(&adev->kfd.reset_work);
5099
5100         if (amdgpu_sriov_vf(adev))
5101                 cancel_work(&adev->virt.flr_work);
5102
5103         if (con && adev->ras_enabled)
5104                 cancel_work(&con->recovery_work);
5105
5106 }
5107
5108
5109 /**
5110  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5111  *
5112  * @adev: amdgpu_device pointer
5113  * @job: which job trigger hang
5114  *
5115  * Attempt to reset the GPU if it has hung (all asics).
5116  * Attempt to do soft-reset or full-reset and reinitialize Asic
5117  * Returns 0 for success or an error on failure.
5118  */
5119
5120 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5121                               struct amdgpu_job *job,
5122                               struct amdgpu_reset_context *reset_context)
5123 {
5124         struct list_head device_list, *device_list_handle =  NULL;
5125         bool job_signaled = false;
5126         struct amdgpu_hive_info *hive = NULL;
5127         struct amdgpu_device *tmp_adev = NULL;
5128         int i, r = 0;
5129         bool need_emergency_restart = false;
5130         bool audio_suspended = false;
5131         int tmp_vram_lost_counter;
5132
5133         /*
5134          * Special case: RAS triggered and full reset isn't supported
5135          */
5136         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5137
5138         /*
5139          * Flush RAM to disk so that after reboot
5140          * the user can read log and see why the system rebooted.
5141          */
5142         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5143                 DRM_WARN("Emergency reboot.");
5144
5145                 ksys_sync_helper();
5146                 emergency_restart();
5147         }
5148
5149         dev_info(adev->dev, "GPU %s begin!\n",
5150                 need_emergency_restart ? "jobs stop":"reset");
5151
5152         if (!amdgpu_sriov_vf(adev))
5153                 hive = amdgpu_get_xgmi_hive(adev);
5154         if (hive)
5155                 mutex_lock(&hive->hive_lock);
5156
5157         reset_context->job = job;
5158         reset_context->hive = hive;
5159         /*
5160          * Build list of devices to reset.
5161          * In case we are in XGMI hive mode, resort the device list
5162          * to put adev in the 1st position.
5163          */
5164         INIT_LIST_HEAD(&device_list);
5165         if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5166                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
5167                         list_add_tail(&tmp_adev->reset_list, &device_list);
5168                 if (!list_is_first(&adev->reset_list, &device_list))
5169                         list_rotate_to_front(&adev->reset_list, &device_list);
5170                 device_list_handle = &device_list;
5171         } else {
5172                 list_add_tail(&adev->reset_list, &device_list);
5173                 device_list_handle = &device_list;
5174         }
5175
5176         /* We need to lock reset domain only once both for XGMI and single device */
5177         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5178                                     reset_list);
5179         amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5180
5181         /* block all schedulers and reset given job's ring */
5182         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5183
5184                 amdgpu_device_set_mp1_state(tmp_adev);
5185
5186                 /*
5187                  * Try to put the audio codec into suspend state
5188                  * before gpu reset started.
5189                  *
5190                  * Due to the power domain of the graphics device
5191                  * is shared with AZ power domain. Without this,
5192                  * we may change the audio hardware from behind
5193                  * the audio driver's back. That will trigger
5194                  * some audio codec errors.
5195                  */
5196                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5197                         audio_suspended = true;
5198
5199                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5200
5201                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5202
5203                 if (!amdgpu_sriov_vf(tmp_adev))
5204                         amdgpu_amdkfd_pre_reset(tmp_adev);
5205
5206                 /*
5207                  * Mark these ASICs to be reseted as untracked first
5208                  * And add them back after reset completed
5209                  */
5210                 amdgpu_unregister_gpu_instance(tmp_adev);
5211
5212                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5213
5214                 /* disable ras on ALL IPs */
5215                 if (!need_emergency_restart &&
5216                       amdgpu_device_ip_need_full_reset(tmp_adev))
5217                         amdgpu_ras_suspend(tmp_adev);
5218
5219                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5220                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5221
5222                         if (!ring || !ring->sched.thread)
5223                                 continue;
5224
5225                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5226
5227                         if (need_emergency_restart)
5228                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5229                 }
5230                 atomic_inc(&tmp_adev->gpu_reset_counter);
5231         }
5232
5233         if (need_emergency_restart)
5234                 goto skip_sched_resume;
5235
5236         /*
5237          * Must check guilty signal here since after this point all old
5238          * HW fences are force signaled.
5239          *
5240          * job->base holds a reference to parent fence
5241          */
5242         if (job && dma_fence_is_signaled(&job->hw_fence)) {
5243                 job_signaled = true;
5244                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5245                 goto skip_hw_reset;
5246         }
5247
5248 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
5249         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5250                 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5251                 /*TODO Should we stop ?*/
5252                 if (r) {
5253                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5254                                   r, adev_to_drm(tmp_adev)->unique);
5255                         tmp_adev->asic_reset_res = r;
5256                 }
5257
5258                 /*
5259                  * Drop all pending non scheduler resets. Scheduler resets
5260                  * were already dropped during drm_sched_stop
5261                  */
5262                 amdgpu_device_stop_pending_resets(tmp_adev);
5263         }
5264
5265         tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5266         /* Actual ASIC resets if needed.*/
5267         /* Host driver will handle XGMI hive reset for SRIOV */
5268         if (amdgpu_sriov_vf(adev)) {
5269                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5270                 if (r)
5271                         adev->asic_reset_res = r;
5272
5273                 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5274                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5275                         amdgpu_ras_resume(adev);
5276         } else {
5277                 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5278                 if (r && r == -EAGAIN)
5279                         goto retry;
5280         }
5281
5282 skip_hw_reset:
5283
5284         /* Post ASIC reset for all devs .*/
5285         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5286
5287                 /*
5288                  * Sometimes a later bad compute job can block a good gfx job as gfx
5289                  * and compute ring share internal GC HW mutually. We add an additional
5290                  * guilty jobs recheck step to find the real guilty job, it synchronously
5291                  * submits and pends for the first job being signaled. If it gets timeout,
5292                  * we identify it as a real guilty job.
5293                  */
5294                 if (amdgpu_gpu_recovery == 2 &&
5295                         !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5296                         amdgpu_device_recheck_guilty_jobs(
5297                                 tmp_adev, device_list_handle, reset_context);
5298
5299                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5300                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5301
5302                         if (!ring || !ring->sched.thread)
5303                                 continue;
5304
5305                         /* No point to resubmit jobs if we didn't HW reset*/
5306                         if (!tmp_adev->asic_reset_res && !job_signaled)
5307                                 drm_sched_resubmit_jobs(&ring->sched);
5308
5309                         drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5310                 }
5311
5312                 if (adev->enable_mes)
5313                         amdgpu_mes_self_test(tmp_adev);
5314
5315                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5316                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5317                 }
5318
5319                 if (tmp_adev->asic_reset_res)
5320                         r = tmp_adev->asic_reset_res;
5321
5322                 tmp_adev->asic_reset_res = 0;
5323
5324                 if (r) {
5325                         /* bad news, how to tell it to userspace ? */
5326                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5327                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5328                 } else {
5329                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5330                         if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5331                                 DRM_WARN("smart shift update failed\n");
5332                 }
5333         }
5334
5335 skip_sched_resume:
5336         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5337                 /* unlock kfd: SRIOV would do it separately */
5338                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5339                         amdgpu_amdkfd_post_reset(tmp_adev);
5340
5341                 /* kfd_post_reset will do nothing if kfd device is not initialized,
5342                  * need to bring up kfd here if it's not be initialized before
5343                  */
5344                 if (!adev->kfd.init_complete)
5345                         amdgpu_amdkfd_device_init(adev);
5346
5347                 if (audio_suspended)
5348                         amdgpu_device_resume_display_audio(tmp_adev);
5349
5350                 amdgpu_device_unset_mp1_state(tmp_adev);
5351         }
5352
5353         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5354                                             reset_list);
5355         amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5356
5357         if (hive) {
5358                 mutex_unlock(&hive->hive_lock);
5359                 amdgpu_put_xgmi_hive(hive);
5360         }
5361
5362         if (r)
5363                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5364
5365         atomic_set(&adev->reset_domain->reset_res, r);
5366         return r;
5367 }
5368
5369 /**
5370  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5371  *
5372  * @adev: amdgpu_device pointer
5373  *
5374  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5375  * and lanes) of the slot the device is in. Handles APUs and
5376  * virtualized environments where PCIE config space may not be available.
5377  */
5378 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5379 {
5380         struct pci_dev *pdev;
5381         enum pci_bus_speed speed_cap, platform_speed_cap;
5382         enum pcie_link_width platform_link_width;
5383
5384         if (amdgpu_pcie_gen_cap)
5385                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5386
5387         if (amdgpu_pcie_lane_cap)
5388                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5389
5390         /* covers APUs as well */
5391         if (pci_is_root_bus(adev->pdev->bus)) {
5392                 if (adev->pm.pcie_gen_mask == 0)
5393                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5394                 if (adev->pm.pcie_mlw_mask == 0)
5395                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5396                 return;
5397         }
5398
5399         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5400                 return;
5401
5402         pcie_bandwidth_available(adev->pdev, NULL,
5403                                  &platform_speed_cap, &platform_link_width);
5404
5405         if (adev->pm.pcie_gen_mask == 0) {
5406                 /* asic caps */
5407                 pdev = adev->pdev;
5408                 speed_cap = pcie_get_speed_cap(pdev);
5409                 if (speed_cap == PCI_SPEED_UNKNOWN) {
5410                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5411                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5412                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5413                 } else {
5414                         if (speed_cap == PCIE_SPEED_32_0GT)
5415                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5416                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5417                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5418                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5419                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5420                         else if (speed_cap == PCIE_SPEED_16_0GT)
5421                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5422                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5423                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5424                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5425                         else if (speed_cap == PCIE_SPEED_8_0GT)
5426                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5427                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5428                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5429                         else if (speed_cap == PCIE_SPEED_5_0GT)
5430                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5431                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5432                         else
5433                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5434                 }
5435                 /* platform caps */
5436                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5437                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5438                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5439                 } else {
5440                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
5441                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5442                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5443                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5444                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5445                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5446                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5447                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5448                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5449                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5450                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5451                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5452                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5453                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5454                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5455                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5456                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5457                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5458                         else
5459                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5460
5461                 }
5462         }
5463         if (adev->pm.pcie_mlw_mask == 0) {
5464                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5465                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5466                 } else {
5467                         switch (platform_link_width) {
5468                         case PCIE_LNK_X32:
5469                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5470                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5471                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5472                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5473                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5474                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5475                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5476                                 break;
5477                         case PCIE_LNK_X16:
5478                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5479                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5480                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5481                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5482                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5483                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5484                                 break;
5485                         case PCIE_LNK_X12:
5486                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5487                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5488                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5489                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5490                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5491                                 break;
5492                         case PCIE_LNK_X8:
5493                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5494                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5495                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5496                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5497                                 break;
5498                         case PCIE_LNK_X4:
5499                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5500                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5501                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5502                                 break;
5503                         case PCIE_LNK_X2:
5504                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5505                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5506                                 break;
5507                         case PCIE_LNK_X1:
5508                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5509                                 break;
5510                         default:
5511                                 break;
5512                         }
5513                 }
5514         }
5515 }
5516
5517 /**
5518  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5519  *
5520  * @adev: amdgpu_device pointer
5521  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5522  *
5523  * Return true if @peer_adev can access (DMA) @adev through the PCIe
5524  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5525  * @peer_adev.
5526  */
5527 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5528                                       struct amdgpu_device *peer_adev)
5529 {
5530 #ifdef CONFIG_HSA_AMD_P2P
5531         uint64_t address_mask = peer_adev->dev->dma_mask ?
5532                 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5533         resource_size_t aper_limit =
5534                 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5535         bool p2p_access = !adev->gmc.xgmi.connected_to_cpu &&
5536                           !(pci_p2pdma_distance_many(adev->pdev,
5537                                         &peer_adev->dev, 1, true) < 0);
5538
5539         return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5540                 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5541                 !(adev->gmc.aper_base & address_mask ||
5542                   aper_limit & address_mask));
5543 #else
5544         return false;
5545 #endif
5546 }
5547
5548 int amdgpu_device_baco_enter(struct drm_device *dev)
5549 {
5550         struct amdgpu_device *adev = drm_to_adev(dev);
5551         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5552
5553         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5554                 return -ENOTSUPP;
5555
5556         if (ras && adev->ras_enabled &&
5557             adev->nbio.funcs->enable_doorbell_interrupt)
5558                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5559
5560         return amdgpu_dpm_baco_enter(adev);
5561 }
5562
5563 int amdgpu_device_baco_exit(struct drm_device *dev)
5564 {
5565         struct amdgpu_device *adev = drm_to_adev(dev);
5566         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5567         int ret = 0;
5568
5569         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5570                 return -ENOTSUPP;
5571
5572         ret = amdgpu_dpm_baco_exit(adev);
5573         if (ret)
5574                 return ret;
5575
5576         if (ras && adev->ras_enabled &&
5577             adev->nbio.funcs->enable_doorbell_interrupt)
5578                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5579
5580         if (amdgpu_passthrough(adev) &&
5581             adev->nbio.funcs->clear_doorbell_interrupt)
5582                 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5583
5584         return 0;
5585 }
5586
5587 /**
5588  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5589  * @pdev: PCI device struct
5590  * @state: PCI channel state
5591  *
5592  * Description: Called when a PCI error is detected.
5593  *
5594  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5595  */
5596 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5597 {
5598         struct drm_device *dev = pci_get_drvdata(pdev);
5599         struct amdgpu_device *adev = drm_to_adev(dev);
5600         int i;
5601
5602         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5603
5604         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5605                 DRM_WARN("No support for XGMI hive yet...");
5606                 return PCI_ERS_RESULT_DISCONNECT;
5607         }
5608
5609         adev->pci_channel_state = state;
5610
5611         switch (state) {
5612         case pci_channel_io_normal:
5613                 return PCI_ERS_RESULT_CAN_RECOVER;
5614         /* Fatal error, prepare for slot reset */
5615         case pci_channel_io_frozen:
5616                 /*
5617                  * Locking adev->reset_domain->sem will prevent any external access
5618                  * to GPU during PCI error recovery
5619                  */
5620                 amdgpu_device_lock_reset_domain(adev->reset_domain);
5621                 amdgpu_device_set_mp1_state(adev);
5622
5623                 /*
5624                  * Block any work scheduling as we do for regular GPU reset
5625                  * for the duration of the recovery
5626                  */
5627                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5628                         struct amdgpu_ring *ring = adev->rings[i];
5629
5630                         if (!ring || !ring->sched.thread)
5631                                 continue;
5632
5633                         drm_sched_stop(&ring->sched, NULL);
5634                 }
5635                 atomic_inc(&adev->gpu_reset_counter);
5636                 return PCI_ERS_RESULT_NEED_RESET;
5637         case pci_channel_io_perm_failure:
5638                 /* Permanent error, prepare for device removal */
5639                 return PCI_ERS_RESULT_DISCONNECT;
5640         }
5641
5642         return PCI_ERS_RESULT_NEED_RESET;
5643 }
5644
5645 /**
5646  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5647  * @pdev: pointer to PCI device
5648  */
5649 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5650 {
5651
5652         DRM_INFO("PCI error: mmio enabled callback!!\n");
5653
5654         /* TODO - dump whatever for debugging purposes */
5655
5656         /* This called only if amdgpu_pci_error_detected returns
5657          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5658          * works, no need to reset slot.
5659          */
5660
5661         return PCI_ERS_RESULT_RECOVERED;
5662 }
5663
5664 /**
5665  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5666  * @pdev: PCI device struct
5667  *
5668  * Description: This routine is called by the pci error recovery
5669  * code after the PCI slot has been reset, just before we
5670  * should resume normal operations.
5671  */
5672 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5673 {
5674         struct drm_device *dev = pci_get_drvdata(pdev);
5675         struct amdgpu_device *adev = drm_to_adev(dev);
5676         int r, i;
5677         struct amdgpu_reset_context reset_context;
5678         u32 memsize;
5679         struct list_head device_list;
5680
5681         DRM_INFO("PCI error: slot reset callback!!\n");
5682
5683         memset(&reset_context, 0, sizeof(reset_context));
5684
5685         INIT_LIST_HEAD(&device_list);
5686         list_add_tail(&adev->reset_list, &device_list);
5687
5688         /* wait for asic to come out of reset */
5689         msleep(500);
5690
5691         /* Restore PCI confspace */
5692         amdgpu_device_load_pci_state(pdev);
5693
5694         /* confirm  ASIC came out of reset */
5695         for (i = 0; i < adev->usec_timeout; i++) {
5696                 memsize = amdgpu_asic_get_config_memsize(adev);
5697
5698                 if (memsize != 0xffffffff)
5699                         break;
5700                 udelay(1);
5701         }
5702         if (memsize == 0xffffffff) {
5703                 r = -ETIME;
5704                 goto out;
5705         }
5706
5707         reset_context.method = AMD_RESET_METHOD_NONE;
5708         reset_context.reset_req_dev = adev;
5709         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5710         set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5711
5712         adev->no_hw_access = true;
5713         r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5714         adev->no_hw_access = false;
5715         if (r)
5716                 goto out;
5717
5718         r = amdgpu_do_asic_reset(&device_list, &reset_context);
5719
5720 out:
5721         if (!r) {
5722                 if (amdgpu_device_cache_pci_state(adev->pdev))
5723                         pci_restore_state(adev->pdev);
5724
5725                 DRM_INFO("PCIe error recovery succeeded\n");
5726         } else {
5727                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5728                 amdgpu_device_unset_mp1_state(adev);
5729                 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5730         }
5731
5732         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5733 }
5734
5735 /**
5736  * amdgpu_pci_resume() - resume normal ops after PCI reset
5737  * @pdev: pointer to PCI device
5738  *
5739  * Called when the error recovery driver tells us that its
5740  * OK to resume normal operation.
5741  */
5742 void amdgpu_pci_resume(struct pci_dev *pdev)
5743 {
5744         struct drm_device *dev = pci_get_drvdata(pdev);
5745         struct amdgpu_device *adev = drm_to_adev(dev);
5746         int i;
5747
5748
5749         DRM_INFO("PCI error: resume callback!!\n");
5750
5751         /* Only continue execution for the case of pci_channel_io_frozen */
5752         if (adev->pci_channel_state != pci_channel_io_frozen)
5753                 return;
5754
5755         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5756                 struct amdgpu_ring *ring = adev->rings[i];
5757
5758                 if (!ring || !ring->sched.thread)
5759                         continue;
5760
5761
5762                 drm_sched_resubmit_jobs(&ring->sched);
5763                 drm_sched_start(&ring->sched, true);
5764         }
5765
5766         amdgpu_device_unset_mp1_state(adev);
5767         amdgpu_device_unlock_reset_domain(adev->reset_domain);
5768 }
5769
5770 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5771 {
5772         struct drm_device *dev = pci_get_drvdata(pdev);
5773         struct amdgpu_device *adev = drm_to_adev(dev);
5774         int r;
5775
5776         r = pci_save_state(pdev);
5777         if (!r) {
5778                 kfree(adev->pci_state);
5779
5780                 adev->pci_state = pci_store_saved_state(pdev);
5781
5782                 if (!adev->pci_state) {
5783                         DRM_ERROR("Failed to store PCI saved state");
5784                         return false;
5785                 }
5786         } else {
5787                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5788                 return false;
5789         }
5790
5791         return true;
5792 }
5793
5794 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5795 {
5796         struct drm_device *dev = pci_get_drvdata(pdev);
5797         struct amdgpu_device *adev = drm_to_adev(dev);
5798         int r;
5799
5800         if (!adev->pci_state)
5801                 return false;
5802
5803         r = pci_load_saved_state(pdev, adev->pci_state);
5804
5805         if (!r) {
5806                 pci_restore_state(pdev);
5807         } else {
5808                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5809                 return false;
5810         }
5811
5812         return true;
5813 }
5814
5815 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5816                 struct amdgpu_ring *ring)
5817 {
5818 #ifdef CONFIG_X86_64
5819         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5820                 return;
5821 #endif
5822         if (adev->gmc.xgmi.connected_to_cpu)
5823                 return;
5824
5825         if (ring && ring->funcs->emit_hdp_flush)
5826                 amdgpu_ring_emit_hdp_flush(ring);
5827         else
5828                 amdgpu_asic_flush_hdp(adev, ring);
5829 }
5830
5831 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5832                 struct amdgpu_ring *ring)
5833 {
5834 #ifdef CONFIG_X86_64
5835         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5836                 return;
5837 #endif
5838         if (adev->gmc.xgmi.connected_to_cpu)
5839                 return;
5840
5841         amdgpu_asic_invalidate_hdp(adev, ring);
5842 }
5843
5844 int amdgpu_in_reset(struct amdgpu_device *adev)
5845 {
5846         return atomic_read(&adev->reset_domain->in_gpu_reset);
5847         }
5848         
5849 /**
5850  * amdgpu_device_halt() - bring hardware to some kind of halt state
5851  *
5852  * @adev: amdgpu_device pointer
5853  *
5854  * Bring hardware to some kind of halt state so that no one can touch it
5855  * any more. It will help to maintain error context when error occurred.
5856  * Compare to a simple hang, the system will keep stable at least for SSH
5857  * access. Then it should be trivial to inspect the hardware state and
5858  * see what's going on. Implemented as following:
5859  *
5860  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5861  *    clears all CPU mappings to device, disallows remappings through page faults
5862  * 2. amdgpu_irq_disable_all() disables all interrupts
5863  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5864  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5865  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5866  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5867  *    flush any in flight DMA operations
5868  */
5869 void amdgpu_device_halt(struct amdgpu_device *adev)
5870 {
5871         struct pci_dev *pdev = adev->pdev;
5872         struct drm_device *ddev = adev_to_drm(adev);
5873
5874         drm_dev_unplug(ddev);
5875
5876         amdgpu_irq_disable_all(adev);
5877
5878         amdgpu_fence_driver_hw_fini(adev);
5879
5880         adev->no_hw_access = true;
5881
5882         amdgpu_device_unmap_mmio(adev);
5883
5884         pci_disable_device(pdev);
5885         pci_wait_for_pending_transaction(pdev);
5886 }
5887
5888 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5889                                 u32 reg)
5890 {
5891         unsigned long flags, address, data;
5892         u32 r;
5893
5894         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5895         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5896
5897         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5898         WREG32(address, reg * 4);
5899         (void)RREG32(address);
5900         r = RREG32(data);
5901         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5902         return r;
5903 }
5904
5905 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5906                                 u32 reg, u32 v)
5907 {
5908         unsigned long flags, address, data;
5909
5910         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5911         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5912
5913         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5914         WREG32(address, reg * 4);
5915         (void)RREG32(address);
5916         WREG32(data, v);
5917         (void)RREG32(data);
5918         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5919 }
This page took 0.377381 seconds and 4 git commands to generate.