]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
Merge tag 'arm-boardfile-remove-6.3' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38
39 #include <drm/drm_aperture.h>
40 #include <drm/drm_atomic_helper.h>
41 #include <drm/drm_fb_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/amdgpu_drm.h>
44 #include <linux/vgaarb.h>
45 #include <linux/vga_switcheroo.h>
46 #include <linux/efi.h>
47 #include "amdgpu.h"
48 #include "amdgpu_trace.h"
49 #include "amdgpu_i2c.h"
50 #include "atom.h"
51 #include "amdgpu_atombios.h"
52 #include "amdgpu_atomfirmware.h"
53 #include "amd_pcie.h"
54 #ifdef CONFIG_DRM_AMDGPU_SI
55 #include "si.h"
56 #endif
57 #ifdef CONFIG_DRM_AMDGPU_CIK
58 #include "cik.h"
59 #endif
60 #include "vi.h"
61 #include "soc15.h"
62 #include "nv.h"
63 #include "bif/bif_4_1_d.h"
64 #include <linux/firmware.h>
65 #include "amdgpu_vf_error.h"
66
67 #include "amdgpu_amdkfd.h"
68 #include "amdgpu_pm.h"
69
70 #include "amdgpu_xgmi.h"
71 #include "amdgpu_ras.h"
72 #include "amdgpu_pmu.h"
73 #include "amdgpu_fru_eeprom.h"
74 #include "amdgpu_reset.h"
75
76 #include <linux/suspend.h>
77 #include <drm/task_barrier.h>
78 #include <linux/pm_runtime.h>
79
80 #include <drm/drm_drv.h>
81
82 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
87 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
88 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
89
90 #define AMDGPU_RESUME_MS                2000
91 #define AMDGPU_MAX_RETRY_LIMIT          2
92 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
93
94 static const struct drm_driver amdgpu_kms_driver;
95
96 const char *amdgpu_asic_name[] = {
97         "TAHITI",
98         "PITCAIRN",
99         "VERDE",
100         "OLAND",
101         "HAINAN",
102         "BONAIRE",
103         "KAVERI",
104         "KABINI",
105         "HAWAII",
106         "MULLINS",
107         "TOPAZ",
108         "TONGA",
109         "FIJI",
110         "CARRIZO",
111         "STONEY",
112         "POLARIS10",
113         "POLARIS11",
114         "POLARIS12",
115         "VEGAM",
116         "VEGA10",
117         "VEGA12",
118         "VEGA20",
119         "RAVEN",
120         "ARCTURUS",
121         "RENOIR",
122         "ALDEBARAN",
123         "NAVI10",
124         "CYAN_SKILLFISH",
125         "NAVI14",
126         "NAVI12",
127         "SIENNA_CICHLID",
128         "NAVY_FLOUNDER",
129         "VANGOGH",
130         "DIMGREY_CAVEFISH",
131         "BEIGE_GOBY",
132         "YELLOW_CARP",
133         "IP DISCOVERY",
134         "LAST",
135 };
136
137 /**
138  * DOC: pcie_replay_count
139  *
140  * The amdgpu driver provides a sysfs API for reporting the total number
141  * of PCIe replays (NAKs)
142  * The file pcie_replay_count is used for this and returns the total
143  * number of replays as a sum of the NAKs generated and NAKs received
144  */
145
146 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
147                 struct device_attribute *attr, char *buf)
148 {
149         struct drm_device *ddev = dev_get_drvdata(dev);
150         struct amdgpu_device *adev = drm_to_adev(ddev);
151         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
152
153         return sysfs_emit(buf, "%llu\n", cnt);
154 }
155
156 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
157                 amdgpu_device_get_pcie_replay_count, NULL);
158
159 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
160
161 /**
162  * DOC: product_name
163  *
164  * The amdgpu driver provides a sysfs API for reporting the product name
165  * for the device
166  * The file serial_number is used for this and returns the product name
167  * as returned from the FRU.
168  * NOTE: This is only available for certain server cards
169  */
170
171 static ssize_t amdgpu_device_get_product_name(struct device *dev,
172                 struct device_attribute *attr, char *buf)
173 {
174         struct drm_device *ddev = dev_get_drvdata(dev);
175         struct amdgpu_device *adev = drm_to_adev(ddev);
176
177         return sysfs_emit(buf, "%s\n", adev->product_name);
178 }
179
180 static DEVICE_ATTR(product_name, S_IRUGO,
181                 amdgpu_device_get_product_name, NULL);
182
183 /**
184  * DOC: product_number
185  *
186  * The amdgpu driver provides a sysfs API for reporting the part number
187  * for the device
188  * The file serial_number is used for this and returns the part number
189  * as returned from the FRU.
190  * NOTE: This is only available for certain server cards
191  */
192
193 static ssize_t amdgpu_device_get_product_number(struct device *dev,
194                 struct device_attribute *attr, char *buf)
195 {
196         struct drm_device *ddev = dev_get_drvdata(dev);
197         struct amdgpu_device *adev = drm_to_adev(ddev);
198
199         return sysfs_emit(buf, "%s\n", adev->product_number);
200 }
201
202 static DEVICE_ATTR(product_number, S_IRUGO,
203                 amdgpu_device_get_product_number, NULL);
204
205 /**
206  * DOC: serial_number
207  *
208  * The amdgpu driver provides a sysfs API for reporting the serial number
209  * for the device
210  * The file serial_number is used for this and returns the serial number
211  * as returned from the FRU.
212  * NOTE: This is only available for certain server cards
213  */
214
215 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
216                 struct device_attribute *attr, char *buf)
217 {
218         struct drm_device *ddev = dev_get_drvdata(dev);
219         struct amdgpu_device *adev = drm_to_adev(ddev);
220
221         return sysfs_emit(buf, "%s\n", adev->serial);
222 }
223
224 static DEVICE_ATTR(serial_number, S_IRUGO,
225                 amdgpu_device_get_serial_number, NULL);
226
227 /**
228  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
229  *
230  * @dev: drm_device pointer
231  *
232  * Returns true if the device is a dGPU with ATPX power control,
233  * otherwise return false.
234  */
235 bool amdgpu_device_supports_px(struct drm_device *dev)
236 {
237         struct amdgpu_device *adev = drm_to_adev(dev);
238
239         if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
240                 return true;
241         return false;
242 }
243
244 /**
245  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
246  *
247  * @dev: drm_device pointer
248  *
249  * Returns true if the device is a dGPU with ACPI power control,
250  * otherwise return false.
251  */
252 bool amdgpu_device_supports_boco(struct drm_device *dev)
253 {
254         struct amdgpu_device *adev = drm_to_adev(dev);
255
256         if (adev->has_pr3 ||
257             ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
258                 return true;
259         return false;
260 }
261
262 /**
263  * amdgpu_device_supports_baco - Does the device support BACO
264  *
265  * @dev: drm_device pointer
266  *
267  * Returns true if the device supporte BACO,
268  * otherwise return false.
269  */
270 bool amdgpu_device_supports_baco(struct drm_device *dev)
271 {
272         struct amdgpu_device *adev = drm_to_adev(dev);
273
274         return amdgpu_asic_supports_baco(adev);
275 }
276
277 /**
278  * amdgpu_device_supports_smart_shift - Is the device dGPU with
279  * smart shift support
280  *
281  * @dev: drm_device pointer
282  *
283  * Returns true if the device is a dGPU with Smart Shift support,
284  * otherwise returns false.
285  */
286 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
287 {
288         return (amdgpu_device_supports_boco(dev) &&
289                 amdgpu_acpi_is_power_shift_control_supported());
290 }
291
292 /*
293  * VRAM access helper functions
294  */
295
296 /**
297  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
298  *
299  * @adev: amdgpu_device pointer
300  * @pos: offset of the buffer in vram
301  * @buf: virtual address of the buffer in system memory
302  * @size: read/write size, sizeof(@buf) must > @size
303  * @write: true - write to vram, otherwise - read from vram
304  */
305 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
306                              void *buf, size_t size, bool write)
307 {
308         unsigned long flags;
309         uint32_t hi = ~0, tmp = 0;
310         uint32_t *data = buf;
311         uint64_t last;
312         int idx;
313
314         if (!drm_dev_enter(adev_to_drm(adev), &idx))
315                 return;
316
317         BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
318
319         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
320         for (last = pos + size; pos < last; pos += 4) {
321                 tmp = pos >> 31;
322
323                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
324                 if (tmp != hi) {
325                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
326                         hi = tmp;
327                 }
328                 if (write)
329                         WREG32_NO_KIQ(mmMM_DATA, *data++);
330                 else
331                         *data++ = RREG32_NO_KIQ(mmMM_DATA);
332         }
333
334         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
335         drm_dev_exit(idx);
336 }
337
338 /**
339  * amdgpu_device_aper_access - access vram by vram aperature
340  *
341  * @adev: amdgpu_device pointer
342  * @pos: offset of the buffer in vram
343  * @buf: virtual address of the buffer in system memory
344  * @size: read/write size, sizeof(@buf) must > @size
345  * @write: true - write to vram, otherwise - read from vram
346  *
347  * The return value means how many bytes have been transferred.
348  */
349 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
350                                  void *buf, size_t size, bool write)
351 {
352 #ifdef CONFIG_64BIT
353         void __iomem *addr;
354         size_t count = 0;
355         uint64_t last;
356
357         if (!adev->mman.aper_base_kaddr)
358                 return 0;
359
360         last = min(pos + size, adev->gmc.visible_vram_size);
361         if (last > pos) {
362                 addr = adev->mman.aper_base_kaddr + pos;
363                 count = last - pos;
364
365                 if (write) {
366                         memcpy_toio(addr, buf, count);
367                         mb();
368                         amdgpu_device_flush_hdp(adev, NULL);
369                 } else {
370                         amdgpu_device_invalidate_hdp(adev, NULL);
371                         mb();
372                         memcpy_fromio(buf, addr, count);
373                 }
374
375         }
376
377         return count;
378 #else
379         return 0;
380 #endif
381 }
382
383 /**
384  * amdgpu_device_vram_access - read/write a buffer in vram
385  *
386  * @adev: amdgpu_device pointer
387  * @pos: offset of the buffer in vram
388  * @buf: virtual address of the buffer in system memory
389  * @size: read/write size, sizeof(@buf) must > @size
390  * @write: true - write to vram, otherwise - read from vram
391  */
392 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
393                                void *buf, size_t size, bool write)
394 {
395         size_t count;
396
397         /* try to using vram apreature to access vram first */
398         count = amdgpu_device_aper_access(adev, pos, buf, size, write);
399         size -= count;
400         if (size) {
401                 /* using MM to access rest vram */
402                 pos += count;
403                 buf += count;
404                 amdgpu_device_mm_access(adev, pos, buf, size, write);
405         }
406 }
407
408 /*
409  * register access helper functions.
410  */
411
412 /* Check if hw access should be skipped because of hotplug or device error */
413 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
414 {
415         if (adev->no_hw_access)
416                 return true;
417
418 #ifdef CONFIG_LOCKDEP
419         /*
420          * This is a bit complicated to understand, so worth a comment. What we assert
421          * here is that the GPU reset is not running on another thread in parallel.
422          *
423          * For this we trylock the read side of the reset semaphore, if that succeeds
424          * we know that the reset is not running in paralell.
425          *
426          * If the trylock fails we assert that we are either already holding the read
427          * side of the lock or are the reset thread itself and hold the write side of
428          * the lock.
429          */
430         if (in_task()) {
431                 if (down_read_trylock(&adev->reset_domain->sem))
432                         up_read(&adev->reset_domain->sem);
433                 else
434                         lockdep_assert_held(&adev->reset_domain->sem);
435         }
436 #endif
437         return false;
438 }
439
440 /**
441  * amdgpu_device_rreg - read a memory mapped IO or indirect register
442  *
443  * @adev: amdgpu_device pointer
444  * @reg: dword aligned register offset
445  * @acc_flags: access flags which require special behavior
446  *
447  * Returns the 32 bit value from the offset specified.
448  */
449 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
450                             uint32_t reg, uint32_t acc_flags)
451 {
452         uint32_t ret;
453
454         if (amdgpu_device_skip_hw_access(adev))
455                 return 0;
456
457         if ((reg * 4) < adev->rmmio_size) {
458                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
459                     amdgpu_sriov_runtime(adev) &&
460                     down_read_trylock(&adev->reset_domain->sem)) {
461                         ret = amdgpu_kiq_rreg(adev, reg);
462                         up_read(&adev->reset_domain->sem);
463                 } else {
464                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
465                 }
466         } else {
467                 ret = adev->pcie_rreg(adev, reg * 4);
468         }
469
470         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
471
472         return ret;
473 }
474
475 /*
476  * MMIO register read with bytes helper functions
477  * @offset:bytes offset from MMIO start
478  *
479 */
480
481 /**
482  * amdgpu_mm_rreg8 - read a memory mapped IO register
483  *
484  * @adev: amdgpu_device pointer
485  * @offset: byte aligned register offset
486  *
487  * Returns the 8 bit value from the offset specified.
488  */
489 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
490 {
491         if (amdgpu_device_skip_hw_access(adev))
492                 return 0;
493
494         if (offset < adev->rmmio_size)
495                 return (readb(adev->rmmio + offset));
496         BUG();
497 }
498
499 /*
500  * MMIO register write with bytes helper functions
501  * @offset:bytes offset from MMIO start
502  * @value: the value want to be written to the register
503  *
504 */
505 /**
506  * amdgpu_mm_wreg8 - read a memory mapped IO register
507  *
508  * @adev: amdgpu_device pointer
509  * @offset: byte aligned register offset
510  * @value: 8 bit value to write
511  *
512  * Writes the value specified to the offset specified.
513  */
514 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
515 {
516         if (amdgpu_device_skip_hw_access(adev))
517                 return;
518
519         if (offset < adev->rmmio_size)
520                 writeb(value, adev->rmmio + offset);
521         else
522                 BUG();
523 }
524
525 /**
526  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
527  *
528  * @adev: amdgpu_device pointer
529  * @reg: dword aligned register offset
530  * @v: 32 bit value to write to the register
531  * @acc_flags: access flags which require special behavior
532  *
533  * Writes the value specified to the offset specified.
534  */
535 void amdgpu_device_wreg(struct amdgpu_device *adev,
536                         uint32_t reg, uint32_t v,
537                         uint32_t acc_flags)
538 {
539         if (amdgpu_device_skip_hw_access(adev))
540                 return;
541
542         if ((reg * 4) < adev->rmmio_size) {
543                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
544                     amdgpu_sriov_runtime(adev) &&
545                     down_read_trylock(&adev->reset_domain->sem)) {
546                         amdgpu_kiq_wreg(adev, reg, v);
547                         up_read(&adev->reset_domain->sem);
548                 } else {
549                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
550                 }
551         } else {
552                 adev->pcie_wreg(adev, reg * 4, v);
553         }
554
555         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
556 }
557
558 /**
559  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
560  *
561  * @adev: amdgpu_device pointer
562  * @reg: mmio/rlc register
563  * @v: value to write
564  *
565  * this function is invoked only for the debugfs register access
566  */
567 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
568                              uint32_t reg, uint32_t v)
569 {
570         if (amdgpu_device_skip_hw_access(adev))
571                 return;
572
573         if (amdgpu_sriov_fullaccess(adev) &&
574             adev->gfx.rlc.funcs &&
575             adev->gfx.rlc.funcs->is_rlcg_access_range) {
576                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
577                         return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
578         } else if ((reg * 4) >= adev->rmmio_size) {
579                 adev->pcie_wreg(adev, reg * 4, v);
580         } else {
581                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
582         }
583 }
584
585 /**
586  * amdgpu_mm_rdoorbell - read a doorbell dword
587  *
588  * @adev: amdgpu_device pointer
589  * @index: doorbell index
590  *
591  * Returns the value in the doorbell aperture at the
592  * requested doorbell index (CIK).
593  */
594 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
595 {
596         if (amdgpu_device_skip_hw_access(adev))
597                 return 0;
598
599         if (index < adev->doorbell.num_doorbells) {
600                 return readl(adev->doorbell.ptr + index);
601         } else {
602                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
603                 return 0;
604         }
605 }
606
607 /**
608  * amdgpu_mm_wdoorbell - write a doorbell dword
609  *
610  * @adev: amdgpu_device pointer
611  * @index: doorbell index
612  * @v: value to write
613  *
614  * Writes @v to the doorbell aperture at the
615  * requested doorbell index (CIK).
616  */
617 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
618 {
619         if (amdgpu_device_skip_hw_access(adev))
620                 return;
621
622         if (index < adev->doorbell.num_doorbells) {
623                 writel(v, adev->doorbell.ptr + index);
624         } else {
625                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
626         }
627 }
628
629 /**
630  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
631  *
632  * @adev: amdgpu_device pointer
633  * @index: doorbell index
634  *
635  * Returns the value in the doorbell aperture at the
636  * requested doorbell index (VEGA10+).
637  */
638 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
639 {
640         if (amdgpu_device_skip_hw_access(adev))
641                 return 0;
642
643         if (index < adev->doorbell.num_doorbells) {
644                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
645         } else {
646                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
647                 return 0;
648         }
649 }
650
651 /**
652  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
653  *
654  * @adev: amdgpu_device pointer
655  * @index: doorbell index
656  * @v: value to write
657  *
658  * Writes @v to the doorbell aperture at the
659  * requested doorbell index (VEGA10+).
660  */
661 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
662 {
663         if (amdgpu_device_skip_hw_access(adev))
664                 return;
665
666         if (index < adev->doorbell.num_doorbells) {
667                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
668         } else {
669                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
670         }
671 }
672
673 /**
674  * amdgpu_device_indirect_rreg - read an indirect register
675  *
676  * @adev: amdgpu_device pointer
677  * @pcie_index: mmio register offset
678  * @pcie_data: mmio register offset
679  * @reg_addr: indirect register address to read from
680  *
681  * Returns the value of indirect register @reg_addr
682  */
683 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
684                                 u32 pcie_index, u32 pcie_data,
685                                 u32 reg_addr)
686 {
687         unsigned long flags;
688         u32 r;
689         void __iomem *pcie_index_offset;
690         void __iomem *pcie_data_offset;
691
692         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
693         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
694         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
695
696         writel(reg_addr, pcie_index_offset);
697         readl(pcie_index_offset);
698         r = readl(pcie_data_offset);
699         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
700
701         return r;
702 }
703
704 /**
705  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
706  *
707  * @adev: amdgpu_device pointer
708  * @pcie_index: mmio register offset
709  * @pcie_data: mmio register offset
710  * @reg_addr: indirect register address to read from
711  *
712  * Returns the value of indirect register @reg_addr
713  */
714 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
715                                   u32 pcie_index, u32 pcie_data,
716                                   u32 reg_addr)
717 {
718         unsigned long flags;
719         u64 r;
720         void __iomem *pcie_index_offset;
721         void __iomem *pcie_data_offset;
722
723         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
724         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
725         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
726
727         /* read low 32 bits */
728         writel(reg_addr, pcie_index_offset);
729         readl(pcie_index_offset);
730         r = readl(pcie_data_offset);
731         /* read high 32 bits */
732         writel(reg_addr + 4, pcie_index_offset);
733         readl(pcie_index_offset);
734         r |= ((u64)readl(pcie_data_offset) << 32);
735         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
736
737         return r;
738 }
739
740 /**
741  * amdgpu_device_indirect_wreg - write an indirect register address
742  *
743  * @adev: amdgpu_device pointer
744  * @pcie_index: mmio register offset
745  * @pcie_data: mmio register offset
746  * @reg_addr: indirect register offset
747  * @reg_data: indirect register data
748  *
749  */
750 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
751                                  u32 pcie_index, u32 pcie_data,
752                                  u32 reg_addr, u32 reg_data)
753 {
754         unsigned long flags;
755         void __iomem *pcie_index_offset;
756         void __iomem *pcie_data_offset;
757
758         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
759         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
760         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
761
762         writel(reg_addr, pcie_index_offset);
763         readl(pcie_index_offset);
764         writel(reg_data, pcie_data_offset);
765         readl(pcie_data_offset);
766         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
767 }
768
769 /**
770  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
771  *
772  * @adev: amdgpu_device pointer
773  * @pcie_index: mmio register offset
774  * @pcie_data: mmio register offset
775  * @reg_addr: indirect register offset
776  * @reg_data: indirect register data
777  *
778  */
779 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
780                                    u32 pcie_index, u32 pcie_data,
781                                    u32 reg_addr, u64 reg_data)
782 {
783         unsigned long flags;
784         void __iomem *pcie_index_offset;
785         void __iomem *pcie_data_offset;
786
787         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
788         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
789         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
790
791         /* write low 32 bits */
792         writel(reg_addr, pcie_index_offset);
793         readl(pcie_index_offset);
794         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
795         readl(pcie_data_offset);
796         /* write high 32 bits */
797         writel(reg_addr + 4, pcie_index_offset);
798         readl(pcie_index_offset);
799         writel((u32)(reg_data >> 32), pcie_data_offset);
800         readl(pcie_data_offset);
801         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
802 }
803
804 /**
805  * amdgpu_invalid_rreg - dummy reg read function
806  *
807  * @adev: amdgpu_device pointer
808  * @reg: offset of register
809  *
810  * Dummy register read function.  Used for register blocks
811  * that certain asics don't have (all asics).
812  * Returns the value in the register.
813  */
814 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
815 {
816         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
817         BUG();
818         return 0;
819 }
820
821 /**
822  * amdgpu_invalid_wreg - dummy reg write function
823  *
824  * @adev: amdgpu_device pointer
825  * @reg: offset of register
826  * @v: value to write to the register
827  *
828  * Dummy register read function.  Used for register blocks
829  * that certain asics don't have (all asics).
830  */
831 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
832 {
833         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
834                   reg, v);
835         BUG();
836 }
837
838 /**
839  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
840  *
841  * @adev: amdgpu_device pointer
842  * @reg: offset of register
843  *
844  * Dummy register read function.  Used for register blocks
845  * that certain asics don't have (all asics).
846  * Returns the value in the register.
847  */
848 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
849 {
850         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
851         BUG();
852         return 0;
853 }
854
855 /**
856  * amdgpu_invalid_wreg64 - dummy reg write function
857  *
858  * @adev: amdgpu_device pointer
859  * @reg: offset of register
860  * @v: value to write to the register
861  *
862  * Dummy register read function.  Used for register blocks
863  * that certain asics don't have (all asics).
864  */
865 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
866 {
867         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
868                   reg, v);
869         BUG();
870 }
871
872 /**
873  * amdgpu_block_invalid_rreg - dummy reg read function
874  *
875  * @adev: amdgpu_device pointer
876  * @block: offset of instance
877  * @reg: offset of register
878  *
879  * Dummy register read function.  Used for register blocks
880  * that certain asics don't have (all asics).
881  * Returns the value in the register.
882  */
883 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
884                                           uint32_t block, uint32_t reg)
885 {
886         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
887                   reg, block);
888         BUG();
889         return 0;
890 }
891
892 /**
893  * amdgpu_block_invalid_wreg - dummy reg write function
894  *
895  * @adev: amdgpu_device pointer
896  * @block: offset of instance
897  * @reg: offset of register
898  * @v: value to write to the register
899  *
900  * Dummy register read function.  Used for register blocks
901  * that certain asics don't have (all asics).
902  */
903 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
904                                       uint32_t block,
905                                       uint32_t reg, uint32_t v)
906 {
907         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
908                   reg, block, v);
909         BUG();
910 }
911
912 /**
913  * amdgpu_device_asic_init - Wrapper for atom asic_init
914  *
915  * @adev: amdgpu_device pointer
916  *
917  * Does any asic specific work and then calls atom asic init.
918  */
919 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
920 {
921         amdgpu_asic_pre_asic_init(adev);
922
923         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
924                 return amdgpu_atomfirmware_asic_init(adev, true);
925         else
926                 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
927 }
928
929 /**
930  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
931  *
932  * @adev: amdgpu_device pointer
933  *
934  * Allocates a scratch page of VRAM for use by various things in the
935  * driver.
936  */
937 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
938 {
939         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
940                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
941                                        &adev->vram_scratch.robj,
942                                        &adev->vram_scratch.gpu_addr,
943                                        (void **)&adev->vram_scratch.ptr);
944 }
945
946 /**
947  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
948  *
949  * @adev: amdgpu_device pointer
950  *
951  * Frees the VRAM scratch page.
952  */
953 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
954 {
955         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
956 }
957
958 /**
959  * amdgpu_device_program_register_sequence - program an array of registers.
960  *
961  * @adev: amdgpu_device pointer
962  * @registers: pointer to the register array
963  * @array_size: size of the register array
964  *
965  * Programs an array or registers with and and or masks.
966  * This is a helper for setting golden registers.
967  */
968 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
969                                              const u32 *registers,
970                                              const u32 array_size)
971 {
972         u32 tmp, reg, and_mask, or_mask;
973         int i;
974
975         if (array_size % 3)
976                 return;
977
978         for (i = 0; i < array_size; i +=3) {
979                 reg = registers[i + 0];
980                 and_mask = registers[i + 1];
981                 or_mask = registers[i + 2];
982
983                 if (and_mask == 0xffffffff) {
984                         tmp = or_mask;
985                 } else {
986                         tmp = RREG32(reg);
987                         tmp &= ~and_mask;
988                         if (adev->family >= AMDGPU_FAMILY_AI)
989                                 tmp |= (or_mask & and_mask);
990                         else
991                                 tmp |= or_mask;
992                 }
993                 WREG32(reg, tmp);
994         }
995 }
996
997 /**
998  * amdgpu_device_pci_config_reset - reset the GPU
999  *
1000  * @adev: amdgpu_device pointer
1001  *
1002  * Resets the GPU using the pci config reset sequence.
1003  * Only applicable to asics prior to vega10.
1004  */
1005 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1006 {
1007         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1008 }
1009
1010 /**
1011  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1012  *
1013  * @adev: amdgpu_device pointer
1014  *
1015  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1016  */
1017 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1018 {
1019         return pci_reset_function(adev->pdev);
1020 }
1021
1022 /*
1023  * GPU doorbell aperture helpers function.
1024  */
1025 /**
1026  * amdgpu_device_doorbell_init - Init doorbell driver information.
1027  *
1028  * @adev: amdgpu_device pointer
1029  *
1030  * Init doorbell driver information (CIK)
1031  * Returns 0 on success, error on failure.
1032  */
1033 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1034 {
1035
1036         /* No doorbell on SI hardware generation */
1037         if (adev->asic_type < CHIP_BONAIRE) {
1038                 adev->doorbell.base = 0;
1039                 adev->doorbell.size = 0;
1040                 adev->doorbell.num_doorbells = 0;
1041                 adev->doorbell.ptr = NULL;
1042                 return 0;
1043         }
1044
1045         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1046                 return -EINVAL;
1047
1048         amdgpu_asic_init_doorbell_index(adev);
1049
1050         /* doorbell bar mapping */
1051         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1052         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1053
1054         if (adev->enable_mes) {
1055                 adev->doorbell.num_doorbells =
1056                         adev->doorbell.size / sizeof(u32);
1057         } else {
1058                 adev->doorbell.num_doorbells =
1059                         min_t(u32, adev->doorbell.size / sizeof(u32),
1060                               adev->doorbell_index.max_assignment+1);
1061                 if (adev->doorbell.num_doorbells == 0)
1062                         return -EINVAL;
1063
1064                 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1065                  * paging queue doorbell use the second page. The
1066                  * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1067                  * doorbells are in the first page. So with paging queue enabled,
1068                  * the max num_doorbells should + 1 page (0x400 in dword)
1069                  */
1070                 if (adev->asic_type >= CHIP_VEGA10)
1071                         adev->doorbell.num_doorbells += 0x400;
1072         }
1073
1074         adev->doorbell.ptr = ioremap(adev->doorbell.base,
1075                                      adev->doorbell.num_doorbells *
1076                                      sizeof(u32));
1077         if (adev->doorbell.ptr == NULL)
1078                 return -ENOMEM;
1079
1080         return 0;
1081 }
1082
1083 /**
1084  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1085  *
1086  * @adev: amdgpu_device pointer
1087  *
1088  * Tear down doorbell driver information (CIK)
1089  */
1090 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1091 {
1092         iounmap(adev->doorbell.ptr);
1093         adev->doorbell.ptr = NULL;
1094 }
1095
1096
1097
1098 /*
1099  * amdgpu_device_wb_*()
1100  * Writeback is the method by which the GPU updates special pages in memory
1101  * with the status of certain GPU events (fences, ring pointers,etc.).
1102  */
1103
1104 /**
1105  * amdgpu_device_wb_fini - Disable Writeback and free memory
1106  *
1107  * @adev: amdgpu_device pointer
1108  *
1109  * Disables Writeback and frees the Writeback memory (all asics).
1110  * Used at driver shutdown.
1111  */
1112 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1113 {
1114         if (adev->wb.wb_obj) {
1115                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1116                                       &adev->wb.gpu_addr,
1117                                       (void **)&adev->wb.wb);
1118                 adev->wb.wb_obj = NULL;
1119         }
1120 }
1121
1122 /**
1123  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1124  *
1125  * @adev: amdgpu_device pointer
1126  *
1127  * Initializes writeback and allocates writeback memory (all asics).
1128  * Used at driver startup.
1129  * Returns 0 on success or an -error on failure.
1130  */
1131 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1132 {
1133         int r;
1134
1135         if (adev->wb.wb_obj == NULL) {
1136                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1137                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1138                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1139                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1140                                             (void **)&adev->wb.wb);
1141                 if (r) {
1142                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1143                         return r;
1144                 }
1145
1146                 adev->wb.num_wb = AMDGPU_MAX_WB;
1147                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1148
1149                 /* clear wb memory */
1150                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1151         }
1152
1153         return 0;
1154 }
1155
1156 /**
1157  * amdgpu_device_wb_get - Allocate a wb entry
1158  *
1159  * @adev: amdgpu_device pointer
1160  * @wb: wb index
1161  *
1162  * Allocate a wb slot for use by the driver (all asics).
1163  * Returns 0 on success or -EINVAL on failure.
1164  */
1165 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1166 {
1167         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1168
1169         if (offset < adev->wb.num_wb) {
1170                 __set_bit(offset, adev->wb.used);
1171                 *wb = offset << 3; /* convert to dw offset */
1172                 return 0;
1173         } else {
1174                 return -EINVAL;
1175         }
1176 }
1177
1178 /**
1179  * amdgpu_device_wb_free - Free a wb entry
1180  *
1181  * @adev: amdgpu_device pointer
1182  * @wb: wb index
1183  *
1184  * Free a wb slot allocated for use by the driver (all asics)
1185  */
1186 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1187 {
1188         wb >>= 3;
1189         if (wb < adev->wb.num_wb)
1190                 __clear_bit(wb, adev->wb.used);
1191 }
1192
1193 /**
1194  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1195  *
1196  * @adev: amdgpu_device pointer
1197  *
1198  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1199  * to fail, but if any of the BARs is not accessible after the size we abort
1200  * driver loading by returning -ENODEV.
1201  */
1202 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1203 {
1204         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1205         struct pci_bus *root;
1206         struct resource *res;
1207         unsigned i;
1208         u16 cmd;
1209         int r;
1210
1211         /* Bypass for VF */
1212         if (amdgpu_sriov_vf(adev))
1213                 return 0;
1214
1215         /* skip if the bios has already enabled large BAR */
1216         if (adev->gmc.real_vram_size &&
1217             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1218                 return 0;
1219
1220         /* Check if the root BUS has 64bit memory resources */
1221         root = adev->pdev->bus;
1222         while (root->parent)
1223                 root = root->parent;
1224
1225         pci_bus_for_each_resource(root, res, i) {
1226                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1227                     res->start > 0x100000000ull)
1228                         break;
1229         }
1230
1231         /* Trying to resize is pointless without a root hub window above 4GB */
1232         if (!res)
1233                 return 0;
1234
1235         /* Limit the BAR size to what is available */
1236         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1237                         rbar_size);
1238
1239         /* Disable memory decoding while we change the BAR addresses and size */
1240         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1241         pci_write_config_word(adev->pdev, PCI_COMMAND,
1242                               cmd & ~PCI_COMMAND_MEMORY);
1243
1244         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1245         amdgpu_device_doorbell_fini(adev);
1246         if (adev->asic_type >= CHIP_BONAIRE)
1247                 pci_release_resource(adev->pdev, 2);
1248
1249         pci_release_resource(adev->pdev, 0);
1250
1251         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1252         if (r == -ENOSPC)
1253                 DRM_INFO("Not enough PCI address space for a large BAR.");
1254         else if (r && r != -ENOTSUPP)
1255                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1256
1257         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1258
1259         /* When the doorbell or fb BAR isn't available we have no chance of
1260          * using the device.
1261          */
1262         r = amdgpu_device_doorbell_init(adev);
1263         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1264                 return -ENODEV;
1265
1266         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1267
1268         return 0;
1269 }
1270
1271 /*
1272  * GPU helpers function.
1273  */
1274 /**
1275  * amdgpu_device_need_post - check if the hw need post or not
1276  *
1277  * @adev: amdgpu_device pointer
1278  *
1279  * Check if the asic has been initialized (all asics) at driver startup
1280  * or post is needed if  hw reset is performed.
1281  * Returns true if need or false if not.
1282  */
1283 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1284 {
1285         uint32_t reg;
1286
1287         if (amdgpu_sriov_vf(adev))
1288                 return false;
1289
1290         if (amdgpu_passthrough(adev)) {
1291                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1292                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1293                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1294                  * vpost executed for smc version below 22.15
1295                  */
1296                 if (adev->asic_type == CHIP_FIJI) {
1297                         int err;
1298                         uint32_t fw_ver;
1299                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1300                         /* force vPost if error occured */
1301                         if (err)
1302                                 return true;
1303
1304                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1305                         if (fw_ver < 0x00160e00)
1306                                 return true;
1307                 }
1308         }
1309
1310         /* Don't post if we need to reset whole hive on init */
1311         if (adev->gmc.xgmi.pending_reset)
1312                 return false;
1313
1314         if (adev->has_hw_reset) {
1315                 adev->has_hw_reset = false;
1316                 return true;
1317         }
1318
1319         /* bios scratch used on CIK+ */
1320         if (adev->asic_type >= CHIP_BONAIRE)
1321                 return amdgpu_atombios_scratch_need_asic_init(adev);
1322
1323         /* check MEM_SIZE for older asics */
1324         reg = amdgpu_asic_get_config_memsize(adev);
1325
1326         if ((reg != 0) && (reg != 0xffffffff))
1327                 return false;
1328
1329         return true;
1330 }
1331
1332 /**
1333  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1334  *
1335  * @adev: amdgpu_device pointer
1336  *
1337  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1338  * be set for this device.
1339  *
1340  * Returns true if it should be used or false if not.
1341  */
1342 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1343 {
1344         switch (amdgpu_aspm) {
1345         case -1:
1346                 break;
1347         case 0:
1348                 return false;
1349         case 1:
1350                 return true;
1351         default:
1352                 return false;
1353         }
1354         return pcie_aspm_enabled(adev->pdev);
1355 }
1356
1357 /* if we get transitioned to only one device, take VGA back */
1358 /**
1359  * amdgpu_device_vga_set_decode - enable/disable vga decode
1360  *
1361  * @pdev: PCI device pointer
1362  * @state: enable/disable vga decode
1363  *
1364  * Enable/disable vga decode (all asics).
1365  * Returns VGA resource flags.
1366  */
1367 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1368                 bool state)
1369 {
1370         struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1371         amdgpu_asic_set_vga_state(adev, state);
1372         if (state)
1373                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1374                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1375         else
1376                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1377 }
1378
1379 /**
1380  * amdgpu_device_check_block_size - validate the vm block size
1381  *
1382  * @adev: amdgpu_device pointer
1383  *
1384  * Validates the vm block size specified via module parameter.
1385  * The vm block size defines number of bits in page table versus page directory,
1386  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1387  * page table and the remaining bits are in the page directory.
1388  */
1389 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1390 {
1391         /* defines number of bits in page table versus page directory,
1392          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1393          * page table and the remaining bits are in the page directory */
1394         if (amdgpu_vm_block_size == -1)
1395                 return;
1396
1397         if (amdgpu_vm_block_size < 9) {
1398                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1399                          amdgpu_vm_block_size);
1400                 amdgpu_vm_block_size = -1;
1401         }
1402 }
1403
1404 /**
1405  * amdgpu_device_check_vm_size - validate the vm size
1406  *
1407  * @adev: amdgpu_device pointer
1408  *
1409  * Validates the vm size in GB specified via module parameter.
1410  * The VM size is the size of the GPU virtual memory space in GB.
1411  */
1412 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1413 {
1414         /* no need to check the default value */
1415         if (amdgpu_vm_size == -1)
1416                 return;
1417
1418         if (amdgpu_vm_size < 1) {
1419                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1420                          amdgpu_vm_size);
1421                 amdgpu_vm_size = -1;
1422         }
1423 }
1424
1425 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1426 {
1427         struct sysinfo si;
1428         bool is_os_64 = (sizeof(void *) == 8);
1429         uint64_t total_memory;
1430         uint64_t dram_size_seven_GB = 0x1B8000000;
1431         uint64_t dram_size_three_GB = 0xB8000000;
1432
1433         if (amdgpu_smu_memory_pool_size == 0)
1434                 return;
1435
1436         if (!is_os_64) {
1437                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1438                 goto def_value;
1439         }
1440         si_meminfo(&si);
1441         total_memory = (uint64_t)si.totalram * si.mem_unit;
1442
1443         if ((amdgpu_smu_memory_pool_size == 1) ||
1444                 (amdgpu_smu_memory_pool_size == 2)) {
1445                 if (total_memory < dram_size_three_GB)
1446                         goto def_value1;
1447         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1448                 (amdgpu_smu_memory_pool_size == 8)) {
1449                 if (total_memory < dram_size_seven_GB)
1450                         goto def_value1;
1451         } else {
1452                 DRM_WARN("Smu memory pool size not supported\n");
1453                 goto def_value;
1454         }
1455         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1456
1457         return;
1458
1459 def_value1:
1460         DRM_WARN("No enough system memory\n");
1461 def_value:
1462         adev->pm.smu_prv_buffer_size = 0;
1463 }
1464
1465 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1466 {
1467         if (!(adev->flags & AMD_IS_APU) ||
1468             adev->asic_type < CHIP_RAVEN)
1469                 return 0;
1470
1471         switch (adev->asic_type) {
1472         case CHIP_RAVEN:
1473                 if (adev->pdev->device == 0x15dd)
1474                         adev->apu_flags |= AMD_APU_IS_RAVEN;
1475                 if (adev->pdev->device == 0x15d8)
1476                         adev->apu_flags |= AMD_APU_IS_PICASSO;
1477                 break;
1478         case CHIP_RENOIR:
1479                 if ((adev->pdev->device == 0x1636) ||
1480                     (adev->pdev->device == 0x164c))
1481                         adev->apu_flags |= AMD_APU_IS_RENOIR;
1482                 else
1483                         adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1484                 break;
1485         case CHIP_VANGOGH:
1486                 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1487                 break;
1488         case CHIP_YELLOW_CARP:
1489                 break;
1490         case CHIP_CYAN_SKILLFISH:
1491                 if ((adev->pdev->device == 0x13FE) ||
1492                     (adev->pdev->device == 0x143F))
1493                         adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1494                 break;
1495         default:
1496                 break;
1497         }
1498
1499         return 0;
1500 }
1501
1502 /**
1503  * amdgpu_device_check_arguments - validate module params
1504  *
1505  * @adev: amdgpu_device pointer
1506  *
1507  * Validates certain module parameters and updates
1508  * the associated values used by the driver (all asics).
1509  */
1510 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1511 {
1512         if (amdgpu_sched_jobs < 4) {
1513                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1514                          amdgpu_sched_jobs);
1515                 amdgpu_sched_jobs = 4;
1516         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1517                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1518                          amdgpu_sched_jobs);
1519                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1520         }
1521
1522         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1523                 /* gart size must be greater or equal to 32M */
1524                 dev_warn(adev->dev, "gart size (%d) too small\n",
1525                          amdgpu_gart_size);
1526                 amdgpu_gart_size = -1;
1527         }
1528
1529         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1530                 /* gtt size must be greater or equal to 32M */
1531                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1532                                  amdgpu_gtt_size);
1533                 amdgpu_gtt_size = -1;
1534         }
1535
1536         /* valid range is between 4 and 9 inclusive */
1537         if (amdgpu_vm_fragment_size != -1 &&
1538             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1539                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1540                 amdgpu_vm_fragment_size = -1;
1541         }
1542
1543         if (amdgpu_sched_hw_submission < 2) {
1544                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1545                          amdgpu_sched_hw_submission);
1546                 amdgpu_sched_hw_submission = 2;
1547         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1548                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1549                          amdgpu_sched_hw_submission);
1550                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1551         }
1552
1553         if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1554                 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1555                 amdgpu_reset_method = -1;
1556         }
1557
1558         amdgpu_device_check_smu_prv_buffer_size(adev);
1559
1560         amdgpu_device_check_vm_size(adev);
1561
1562         amdgpu_device_check_block_size(adev);
1563
1564         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1565
1566         return 0;
1567 }
1568
1569 /**
1570  * amdgpu_switcheroo_set_state - set switcheroo state
1571  *
1572  * @pdev: pci dev pointer
1573  * @state: vga_switcheroo state
1574  *
1575  * Callback for the switcheroo driver.  Suspends or resumes
1576  * the asics before or after it is powered up using ACPI methods.
1577  */
1578 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1579                                         enum vga_switcheroo_state state)
1580 {
1581         struct drm_device *dev = pci_get_drvdata(pdev);
1582         int r;
1583
1584         if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1585                 return;
1586
1587         if (state == VGA_SWITCHEROO_ON) {
1588                 pr_info("switched on\n");
1589                 /* don't suspend or resume card normally */
1590                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1591
1592                 pci_set_power_state(pdev, PCI_D0);
1593                 amdgpu_device_load_pci_state(pdev);
1594                 r = pci_enable_device(pdev);
1595                 if (r)
1596                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1597                 amdgpu_device_resume(dev, true);
1598
1599                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1600         } else {
1601                 pr_info("switched off\n");
1602                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1603                 amdgpu_device_suspend(dev, true);
1604                 amdgpu_device_cache_pci_state(pdev);
1605                 /* Shut down the device */
1606                 pci_disable_device(pdev);
1607                 pci_set_power_state(pdev, PCI_D3cold);
1608                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1609         }
1610 }
1611
1612 /**
1613  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1614  *
1615  * @pdev: pci dev pointer
1616  *
1617  * Callback for the switcheroo driver.  Check of the switcheroo
1618  * state can be changed.
1619  * Returns true if the state can be changed, false if not.
1620  */
1621 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1622 {
1623         struct drm_device *dev = pci_get_drvdata(pdev);
1624
1625         /*
1626         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1627         * locking inversion with the driver load path. And the access here is
1628         * completely racy anyway. So don't bother with locking for now.
1629         */
1630         return atomic_read(&dev->open_count) == 0;
1631 }
1632
1633 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1634         .set_gpu_state = amdgpu_switcheroo_set_state,
1635         .reprobe = NULL,
1636         .can_switch = amdgpu_switcheroo_can_switch,
1637 };
1638
1639 /**
1640  * amdgpu_device_ip_set_clockgating_state - set the CG state
1641  *
1642  * @dev: amdgpu_device pointer
1643  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1644  * @state: clockgating state (gate or ungate)
1645  *
1646  * Sets the requested clockgating state for all instances of
1647  * the hardware IP specified.
1648  * Returns the error code from the last instance.
1649  */
1650 int amdgpu_device_ip_set_clockgating_state(void *dev,
1651                                            enum amd_ip_block_type block_type,
1652                                            enum amd_clockgating_state state)
1653 {
1654         struct amdgpu_device *adev = dev;
1655         int i, r = 0;
1656
1657         for (i = 0; i < adev->num_ip_blocks; i++) {
1658                 if (!adev->ip_blocks[i].status.valid)
1659                         continue;
1660                 if (adev->ip_blocks[i].version->type != block_type)
1661                         continue;
1662                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1663                         continue;
1664                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1665                         (void *)adev, state);
1666                 if (r)
1667                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1668                                   adev->ip_blocks[i].version->funcs->name, r);
1669         }
1670         return r;
1671 }
1672
1673 /**
1674  * amdgpu_device_ip_set_powergating_state - set the PG state
1675  *
1676  * @dev: amdgpu_device pointer
1677  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1678  * @state: powergating state (gate or ungate)
1679  *
1680  * Sets the requested powergating state for all instances of
1681  * the hardware IP specified.
1682  * Returns the error code from the last instance.
1683  */
1684 int amdgpu_device_ip_set_powergating_state(void *dev,
1685                                            enum amd_ip_block_type block_type,
1686                                            enum amd_powergating_state state)
1687 {
1688         struct amdgpu_device *adev = dev;
1689         int i, r = 0;
1690
1691         for (i = 0; i < adev->num_ip_blocks; i++) {
1692                 if (!adev->ip_blocks[i].status.valid)
1693                         continue;
1694                 if (adev->ip_blocks[i].version->type != block_type)
1695                         continue;
1696                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1697                         continue;
1698                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1699                         (void *)adev, state);
1700                 if (r)
1701                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1702                                   adev->ip_blocks[i].version->funcs->name, r);
1703         }
1704         return r;
1705 }
1706
1707 /**
1708  * amdgpu_device_ip_get_clockgating_state - get the CG state
1709  *
1710  * @adev: amdgpu_device pointer
1711  * @flags: clockgating feature flags
1712  *
1713  * Walks the list of IPs on the device and updates the clockgating
1714  * flags for each IP.
1715  * Updates @flags with the feature flags for each hardware IP where
1716  * clockgating is enabled.
1717  */
1718 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1719                                             u64 *flags)
1720 {
1721         int i;
1722
1723         for (i = 0; i < adev->num_ip_blocks; i++) {
1724                 if (!adev->ip_blocks[i].status.valid)
1725                         continue;
1726                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1727                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1728         }
1729 }
1730
1731 /**
1732  * amdgpu_device_ip_wait_for_idle - wait for idle
1733  *
1734  * @adev: amdgpu_device pointer
1735  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1736  *
1737  * Waits for the request hardware IP to be idle.
1738  * Returns 0 for success or a negative error code on failure.
1739  */
1740 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1741                                    enum amd_ip_block_type block_type)
1742 {
1743         int i, r;
1744
1745         for (i = 0; i < adev->num_ip_blocks; i++) {
1746                 if (!adev->ip_blocks[i].status.valid)
1747                         continue;
1748                 if (adev->ip_blocks[i].version->type == block_type) {
1749                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1750                         if (r)
1751                                 return r;
1752                         break;
1753                 }
1754         }
1755         return 0;
1756
1757 }
1758
1759 /**
1760  * amdgpu_device_ip_is_idle - is the hardware IP idle
1761  *
1762  * @adev: amdgpu_device pointer
1763  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1764  *
1765  * Check if the hardware IP is idle or not.
1766  * Returns true if it the IP is idle, false if not.
1767  */
1768 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1769                               enum amd_ip_block_type block_type)
1770 {
1771         int i;
1772
1773         for (i = 0; i < adev->num_ip_blocks; i++) {
1774                 if (!adev->ip_blocks[i].status.valid)
1775                         continue;
1776                 if (adev->ip_blocks[i].version->type == block_type)
1777                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1778         }
1779         return true;
1780
1781 }
1782
1783 /**
1784  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1785  *
1786  * @adev: amdgpu_device pointer
1787  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1788  *
1789  * Returns a pointer to the hardware IP block structure
1790  * if it exists for the asic, otherwise NULL.
1791  */
1792 struct amdgpu_ip_block *
1793 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1794                               enum amd_ip_block_type type)
1795 {
1796         int i;
1797
1798         for (i = 0; i < adev->num_ip_blocks; i++)
1799                 if (adev->ip_blocks[i].version->type == type)
1800                         return &adev->ip_blocks[i];
1801
1802         return NULL;
1803 }
1804
1805 /**
1806  * amdgpu_device_ip_block_version_cmp
1807  *
1808  * @adev: amdgpu_device pointer
1809  * @type: enum amd_ip_block_type
1810  * @major: major version
1811  * @minor: minor version
1812  *
1813  * return 0 if equal or greater
1814  * return 1 if smaller or the ip_block doesn't exist
1815  */
1816 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1817                                        enum amd_ip_block_type type,
1818                                        u32 major, u32 minor)
1819 {
1820         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1821
1822         if (ip_block && ((ip_block->version->major > major) ||
1823                         ((ip_block->version->major == major) &&
1824                         (ip_block->version->minor >= minor))))
1825                 return 0;
1826
1827         return 1;
1828 }
1829
1830 /**
1831  * amdgpu_device_ip_block_add
1832  *
1833  * @adev: amdgpu_device pointer
1834  * @ip_block_version: pointer to the IP to add
1835  *
1836  * Adds the IP block driver information to the collection of IPs
1837  * on the asic.
1838  */
1839 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1840                                const struct amdgpu_ip_block_version *ip_block_version)
1841 {
1842         if (!ip_block_version)
1843                 return -EINVAL;
1844
1845         switch (ip_block_version->type) {
1846         case AMD_IP_BLOCK_TYPE_VCN:
1847                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1848                         return 0;
1849                 break;
1850         case AMD_IP_BLOCK_TYPE_JPEG:
1851                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1852                         return 0;
1853                 break;
1854         default:
1855                 break;
1856         }
1857
1858         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1859                   ip_block_version->funcs->name);
1860
1861         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1862
1863         return 0;
1864 }
1865
1866 /**
1867  * amdgpu_device_enable_virtual_display - enable virtual display feature
1868  *
1869  * @adev: amdgpu_device pointer
1870  *
1871  * Enabled the virtual display feature if the user has enabled it via
1872  * the module parameter virtual_display.  This feature provides a virtual
1873  * display hardware on headless boards or in virtualized environments.
1874  * This function parses and validates the configuration string specified by
1875  * the user and configues the virtual display configuration (number of
1876  * virtual connectors, crtcs, etc.) specified.
1877  */
1878 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1879 {
1880         adev->enable_virtual_display = false;
1881
1882         if (amdgpu_virtual_display) {
1883                 const char *pci_address_name = pci_name(adev->pdev);
1884                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1885
1886                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1887                 pciaddstr_tmp = pciaddstr;
1888                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1889                         pciaddname = strsep(&pciaddname_tmp, ",");
1890                         if (!strcmp("all", pciaddname)
1891                             || !strcmp(pci_address_name, pciaddname)) {
1892                                 long num_crtc;
1893                                 int res = -1;
1894
1895                                 adev->enable_virtual_display = true;
1896
1897                                 if (pciaddname_tmp)
1898                                         res = kstrtol(pciaddname_tmp, 10,
1899                                                       &num_crtc);
1900
1901                                 if (!res) {
1902                                         if (num_crtc < 1)
1903                                                 num_crtc = 1;
1904                                         if (num_crtc > 6)
1905                                                 num_crtc = 6;
1906                                         adev->mode_info.num_crtc = num_crtc;
1907                                 } else {
1908                                         adev->mode_info.num_crtc = 1;
1909                                 }
1910                                 break;
1911                         }
1912                 }
1913
1914                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1915                          amdgpu_virtual_display, pci_address_name,
1916                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1917
1918                 kfree(pciaddstr);
1919         }
1920 }
1921
1922 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
1923 {
1924         if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
1925                 adev->mode_info.num_crtc = 1;
1926                 adev->enable_virtual_display = true;
1927                 DRM_INFO("virtual_display:%d, num_crtc:%d\n",
1928                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1929         }
1930 }
1931
1932 /**
1933  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1934  *
1935  * @adev: amdgpu_device pointer
1936  *
1937  * Parses the asic configuration parameters specified in the gpu info
1938  * firmware and makes them availale to the driver for use in configuring
1939  * the asic.
1940  * Returns 0 on success, -EINVAL on failure.
1941  */
1942 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1943 {
1944         const char *chip_name;
1945         char fw_name[40];
1946         int err;
1947         const struct gpu_info_firmware_header_v1_0 *hdr;
1948
1949         adev->firmware.gpu_info_fw = NULL;
1950
1951         if (adev->mman.discovery_bin) {
1952                 /*
1953                  * FIXME: The bounding box is still needed by Navi12, so
1954                  * temporarily read it from gpu_info firmware. Should be dropped
1955                  * when DAL no longer needs it.
1956                  */
1957                 if (adev->asic_type != CHIP_NAVI12)
1958                         return 0;
1959         }
1960
1961         switch (adev->asic_type) {
1962         default:
1963                 return 0;
1964         case CHIP_VEGA10:
1965                 chip_name = "vega10";
1966                 break;
1967         case CHIP_VEGA12:
1968                 chip_name = "vega12";
1969                 break;
1970         case CHIP_RAVEN:
1971                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1972                         chip_name = "raven2";
1973                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1974                         chip_name = "picasso";
1975                 else
1976                         chip_name = "raven";
1977                 break;
1978         case CHIP_ARCTURUS:
1979                 chip_name = "arcturus";
1980                 break;
1981         case CHIP_NAVI12:
1982                 chip_name = "navi12";
1983                 break;
1984         }
1985
1986         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1987         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1988         if (err) {
1989                 dev_err(adev->dev,
1990                         "Failed to load gpu_info firmware \"%s\"\n",
1991                         fw_name);
1992                 goto out;
1993         }
1994         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1995         if (err) {
1996                 dev_err(adev->dev,
1997                         "Failed to validate gpu_info firmware \"%s\"\n",
1998                         fw_name);
1999                 goto out;
2000         }
2001
2002         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2003         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2004
2005         switch (hdr->version_major) {
2006         case 1:
2007         {
2008                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2009                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2010                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2011
2012                 /*
2013                  * Should be droped when DAL no longer needs it.
2014                  */
2015                 if (adev->asic_type == CHIP_NAVI12)
2016                         goto parse_soc_bounding_box;
2017
2018                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2019                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2020                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2021                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2022                 adev->gfx.config.max_texture_channel_caches =
2023                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
2024                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2025                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2026                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2027                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2028                 adev->gfx.config.double_offchip_lds_buf =
2029                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2030                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2031                 adev->gfx.cu_info.max_waves_per_simd =
2032                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2033                 adev->gfx.cu_info.max_scratch_slots_per_cu =
2034                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2035                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2036                 if (hdr->version_minor >= 1) {
2037                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2038                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2039                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2040                         adev->gfx.config.num_sc_per_sh =
2041                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2042                         adev->gfx.config.num_packer_per_sc =
2043                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2044                 }
2045
2046 parse_soc_bounding_box:
2047                 /*
2048                  * soc bounding box info is not integrated in disocovery table,
2049                  * we always need to parse it from gpu info firmware if needed.
2050                  */
2051                 if (hdr->version_minor == 2) {
2052                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2053                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2054                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2055                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2056                 }
2057                 break;
2058         }
2059         default:
2060                 dev_err(adev->dev,
2061                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2062                 err = -EINVAL;
2063                 goto out;
2064         }
2065 out:
2066         return err;
2067 }
2068
2069 /**
2070  * amdgpu_device_ip_early_init - run early init for hardware IPs
2071  *
2072  * @adev: amdgpu_device pointer
2073  *
2074  * Early initialization pass for hardware IPs.  The hardware IPs that make
2075  * up each asic are discovered each IP's early_init callback is run.  This
2076  * is the first stage in initializing the asic.
2077  * Returns 0 on success, negative error code on failure.
2078  */
2079 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2080 {
2081         struct drm_device *dev = adev_to_drm(adev);
2082         struct pci_dev *parent;
2083         int i, r;
2084
2085         amdgpu_device_enable_virtual_display(adev);
2086
2087         if (amdgpu_sriov_vf(adev)) {
2088                 r = amdgpu_virt_request_full_gpu(adev, true);
2089                 if (r)
2090                         return r;
2091         }
2092
2093         switch (adev->asic_type) {
2094 #ifdef CONFIG_DRM_AMDGPU_SI
2095         case CHIP_VERDE:
2096         case CHIP_TAHITI:
2097         case CHIP_PITCAIRN:
2098         case CHIP_OLAND:
2099         case CHIP_HAINAN:
2100                 adev->family = AMDGPU_FAMILY_SI;
2101                 r = si_set_ip_blocks(adev);
2102                 if (r)
2103                         return r;
2104                 break;
2105 #endif
2106 #ifdef CONFIG_DRM_AMDGPU_CIK
2107         case CHIP_BONAIRE:
2108         case CHIP_HAWAII:
2109         case CHIP_KAVERI:
2110         case CHIP_KABINI:
2111         case CHIP_MULLINS:
2112                 if (adev->flags & AMD_IS_APU)
2113                         adev->family = AMDGPU_FAMILY_KV;
2114                 else
2115                         adev->family = AMDGPU_FAMILY_CI;
2116
2117                 r = cik_set_ip_blocks(adev);
2118                 if (r)
2119                         return r;
2120                 break;
2121 #endif
2122         case CHIP_TOPAZ:
2123         case CHIP_TONGA:
2124         case CHIP_FIJI:
2125         case CHIP_POLARIS10:
2126         case CHIP_POLARIS11:
2127         case CHIP_POLARIS12:
2128         case CHIP_VEGAM:
2129         case CHIP_CARRIZO:
2130         case CHIP_STONEY:
2131                 if (adev->flags & AMD_IS_APU)
2132                         adev->family = AMDGPU_FAMILY_CZ;
2133                 else
2134                         adev->family = AMDGPU_FAMILY_VI;
2135
2136                 r = vi_set_ip_blocks(adev);
2137                 if (r)
2138                         return r;
2139                 break;
2140         default:
2141                 r = amdgpu_discovery_set_ip_blocks(adev);
2142                 if (r)
2143                         return r;
2144                 break;
2145         }
2146
2147         if (amdgpu_has_atpx() &&
2148             (amdgpu_is_atpx_hybrid() ||
2149              amdgpu_has_atpx_dgpu_power_cntl()) &&
2150             ((adev->flags & AMD_IS_APU) == 0) &&
2151             !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2152                 adev->flags |= AMD_IS_PX;
2153
2154         if (!(adev->flags & AMD_IS_APU)) {
2155                 parent = pci_upstream_bridge(adev->pdev);
2156                 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2157         }
2158
2159         amdgpu_amdkfd_device_probe(adev);
2160
2161         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2162         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2163                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2164         if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2165                 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2166
2167         for (i = 0; i < adev->num_ip_blocks; i++) {
2168                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2169                         DRM_ERROR("disabled ip block: %d <%s>\n",
2170                                   i, adev->ip_blocks[i].version->funcs->name);
2171                         adev->ip_blocks[i].status.valid = false;
2172                 } else {
2173                         if (adev->ip_blocks[i].version->funcs->early_init) {
2174                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2175                                 if (r == -ENOENT) {
2176                                         adev->ip_blocks[i].status.valid = false;
2177                                 } else if (r) {
2178                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2179                                                   adev->ip_blocks[i].version->funcs->name, r);
2180                                         return r;
2181                                 } else {
2182                                         adev->ip_blocks[i].status.valid = true;
2183                                 }
2184                         } else {
2185                                 adev->ip_blocks[i].status.valid = true;
2186                         }
2187                 }
2188                 /* get the vbios after the asic_funcs are set up */
2189                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2190                         r = amdgpu_device_parse_gpu_info_fw(adev);
2191                         if (r)
2192                                 return r;
2193
2194                         /* Read BIOS */
2195                         if (!amdgpu_get_bios(adev))
2196                                 return -EINVAL;
2197
2198                         r = amdgpu_atombios_init(adev);
2199                         if (r) {
2200                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2201                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2202                                 return r;
2203                         }
2204
2205                         /*get pf2vf msg info at it's earliest time*/
2206                         if (amdgpu_sriov_vf(adev))
2207                                 amdgpu_virt_init_data_exchange(adev);
2208
2209                 }
2210         }
2211
2212         adev->cg_flags &= amdgpu_cg_mask;
2213         adev->pg_flags &= amdgpu_pg_mask;
2214
2215         return 0;
2216 }
2217
2218 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2219 {
2220         int i, r;
2221
2222         for (i = 0; i < adev->num_ip_blocks; i++) {
2223                 if (!adev->ip_blocks[i].status.sw)
2224                         continue;
2225                 if (adev->ip_blocks[i].status.hw)
2226                         continue;
2227                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2228                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2229                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2230                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2231                         if (r) {
2232                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2233                                           adev->ip_blocks[i].version->funcs->name, r);
2234                                 return r;
2235                         }
2236                         adev->ip_blocks[i].status.hw = true;
2237                 }
2238         }
2239
2240         return 0;
2241 }
2242
2243 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2244 {
2245         int i, r;
2246
2247         for (i = 0; i < adev->num_ip_blocks; i++) {
2248                 if (!adev->ip_blocks[i].status.sw)
2249                         continue;
2250                 if (adev->ip_blocks[i].status.hw)
2251                         continue;
2252                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2253                 if (r) {
2254                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2255                                   adev->ip_blocks[i].version->funcs->name, r);
2256                         return r;
2257                 }
2258                 adev->ip_blocks[i].status.hw = true;
2259         }
2260
2261         return 0;
2262 }
2263
2264 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2265 {
2266         int r = 0;
2267         int i;
2268         uint32_t smu_version;
2269
2270         if (adev->asic_type >= CHIP_VEGA10) {
2271                 for (i = 0; i < adev->num_ip_blocks; i++) {
2272                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2273                                 continue;
2274
2275                         if (!adev->ip_blocks[i].status.sw)
2276                                 continue;
2277
2278                         /* no need to do the fw loading again if already done*/
2279                         if (adev->ip_blocks[i].status.hw == true)
2280                                 break;
2281
2282                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2283                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2284                                 if (r) {
2285                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2286                                                           adev->ip_blocks[i].version->funcs->name, r);
2287                                         return r;
2288                                 }
2289                         } else {
2290                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2291                                 if (r) {
2292                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2293                                                           adev->ip_blocks[i].version->funcs->name, r);
2294                                         return r;
2295                                 }
2296                         }
2297
2298                         adev->ip_blocks[i].status.hw = true;
2299                         break;
2300                 }
2301         }
2302
2303         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2304                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2305
2306         return r;
2307 }
2308
2309 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2310 {
2311         long timeout;
2312         int r, i;
2313
2314         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2315                 struct amdgpu_ring *ring = adev->rings[i];
2316
2317                 /* No need to setup the GPU scheduler for rings that don't need it */
2318                 if (!ring || ring->no_scheduler)
2319                         continue;
2320
2321                 switch (ring->funcs->type) {
2322                 case AMDGPU_RING_TYPE_GFX:
2323                         timeout = adev->gfx_timeout;
2324                         break;
2325                 case AMDGPU_RING_TYPE_COMPUTE:
2326                         timeout = adev->compute_timeout;
2327                         break;
2328                 case AMDGPU_RING_TYPE_SDMA:
2329                         timeout = adev->sdma_timeout;
2330                         break;
2331                 default:
2332                         timeout = adev->video_timeout;
2333                         break;
2334                 }
2335
2336                 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2337                                    ring->num_hw_submission, amdgpu_job_hang_limit,
2338                                    timeout, adev->reset_domain->wq,
2339                                    ring->sched_score, ring->name,
2340                                    adev->dev);
2341                 if (r) {
2342                         DRM_ERROR("Failed to create scheduler on ring %s.\n",
2343                                   ring->name);
2344                         return r;
2345                 }
2346         }
2347
2348         return 0;
2349 }
2350
2351
2352 /**
2353  * amdgpu_device_ip_init - run init for hardware IPs
2354  *
2355  * @adev: amdgpu_device pointer
2356  *
2357  * Main initialization pass for hardware IPs.  The list of all the hardware
2358  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2359  * are run.  sw_init initializes the software state associated with each IP
2360  * and hw_init initializes the hardware associated with each IP.
2361  * Returns 0 on success, negative error code on failure.
2362  */
2363 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2364 {
2365         int i, r;
2366
2367         r = amdgpu_ras_init(adev);
2368         if (r)
2369                 return r;
2370
2371         for (i = 0; i < adev->num_ip_blocks; i++) {
2372                 if (!adev->ip_blocks[i].status.valid)
2373                         continue;
2374                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2375                 if (r) {
2376                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2377                                   adev->ip_blocks[i].version->funcs->name, r);
2378                         goto init_failed;
2379                 }
2380                 adev->ip_blocks[i].status.sw = true;
2381
2382                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2383                         /* need to do common hw init early so everything is set up for gmc */
2384                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2385                         if (r) {
2386                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2387                                 goto init_failed;
2388                         }
2389                         adev->ip_blocks[i].status.hw = true;
2390                 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2391                         /* need to do gmc hw init early so we can allocate gpu mem */
2392                         /* Try to reserve bad pages early */
2393                         if (amdgpu_sriov_vf(adev))
2394                                 amdgpu_virt_exchange_data(adev);
2395
2396                         r = amdgpu_device_vram_scratch_init(adev);
2397                         if (r) {
2398                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2399                                 goto init_failed;
2400                         }
2401                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2402                         if (r) {
2403                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2404                                 goto init_failed;
2405                         }
2406                         r = amdgpu_device_wb_init(adev);
2407                         if (r) {
2408                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2409                                 goto init_failed;
2410                         }
2411                         adev->ip_blocks[i].status.hw = true;
2412
2413                         /* right after GMC hw init, we create CSA */
2414                         if (amdgpu_mcbp) {
2415                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2416                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2417                                                                 AMDGPU_CSA_SIZE);
2418                                 if (r) {
2419                                         DRM_ERROR("allocate CSA failed %d\n", r);
2420                                         goto init_failed;
2421                                 }
2422                         }
2423                 }
2424         }
2425
2426         if (amdgpu_sriov_vf(adev))
2427                 amdgpu_virt_init_data_exchange(adev);
2428
2429         r = amdgpu_ib_pool_init(adev);
2430         if (r) {
2431                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2432                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2433                 goto init_failed;
2434         }
2435
2436         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2437         if (r)
2438                 goto init_failed;
2439
2440         r = amdgpu_device_ip_hw_init_phase1(adev);
2441         if (r)
2442                 goto init_failed;
2443
2444         r = amdgpu_device_fw_loading(adev);
2445         if (r)
2446                 goto init_failed;
2447
2448         r = amdgpu_device_ip_hw_init_phase2(adev);
2449         if (r)
2450                 goto init_failed;
2451
2452         /*
2453          * retired pages will be loaded from eeprom and reserved here,
2454          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2455          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2456          * for I2C communication which only true at this point.
2457          *
2458          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2459          * failure from bad gpu situation and stop amdgpu init process
2460          * accordingly. For other failed cases, it will still release all
2461          * the resource and print error message, rather than returning one
2462          * negative value to upper level.
2463          *
2464          * Note: theoretically, this should be called before all vram allocations
2465          * to protect retired page from abusing
2466          */
2467         r = amdgpu_ras_recovery_init(adev);
2468         if (r)
2469                 goto init_failed;
2470
2471         /**
2472          * In case of XGMI grab extra reference for reset domain for this device
2473          */
2474         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2475                 if (amdgpu_xgmi_add_device(adev) == 0) {
2476                         if (!amdgpu_sriov_vf(adev)) {
2477                                 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2478
2479                                 if (WARN_ON(!hive)) {
2480                                         r = -ENOENT;
2481                                         goto init_failed;
2482                                 }
2483
2484                                 if (!hive->reset_domain ||
2485                                     !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2486                                         r = -ENOENT;
2487                                         amdgpu_put_xgmi_hive(hive);
2488                                         goto init_failed;
2489                                 }
2490
2491                                 /* Drop the early temporary reset domain we created for device */
2492                                 amdgpu_reset_put_reset_domain(adev->reset_domain);
2493                                 adev->reset_domain = hive->reset_domain;
2494                                 amdgpu_put_xgmi_hive(hive);
2495                         }
2496                 }
2497         }
2498
2499         r = amdgpu_device_init_schedulers(adev);
2500         if (r)
2501                 goto init_failed;
2502
2503         /* Don't init kfd if whole hive need to be reset during init */
2504         if (!adev->gmc.xgmi.pending_reset)
2505                 amdgpu_amdkfd_device_init(adev);
2506
2507         amdgpu_fru_get_product_info(adev);
2508
2509 init_failed:
2510         if (amdgpu_sriov_vf(adev))
2511                 amdgpu_virt_release_full_gpu(adev, true);
2512
2513         return r;
2514 }
2515
2516 /**
2517  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2518  *
2519  * @adev: amdgpu_device pointer
2520  *
2521  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2522  * this function before a GPU reset.  If the value is retained after a
2523  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2524  */
2525 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2526 {
2527         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2528 }
2529
2530 /**
2531  * amdgpu_device_check_vram_lost - check if vram is valid
2532  *
2533  * @adev: amdgpu_device pointer
2534  *
2535  * Checks the reset magic value written to the gart pointer in VRAM.
2536  * The driver calls this after a GPU reset to see if the contents of
2537  * VRAM is lost or now.
2538  * returns true if vram is lost, false if not.
2539  */
2540 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2541 {
2542         if (memcmp(adev->gart.ptr, adev->reset_magic,
2543                         AMDGPU_RESET_MAGIC_NUM))
2544                 return true;
2545
2546         if (!amdgpu_in_reset(adev))
2547                 return false;
2548
2549         /*
2550          * For all ASICs with baco/mode1 reset, the VRAM is
2551          * always assumed to be lost.
2552          */
2553         switch (amdgpu_asic_reset_method(adev)) {
2554         case AMD_RESET_METHOD_BACO:
2555         case AMD_RESET_METHOD_MODE1:
2556                 return true;
2557         default:
2558                 return false;
2559         }
2560 }
2561
2562 /**
2563  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2564  *
2565  * @adev: amdgpu_device pointer
2566  * @state: clockgating state (gate or ungate)
2567  *
2568  * The list of all the hardware IPs that make up the asic is walked and the
2569  * set_clockgating_state callbacks are run.
2570  * Late initialization pass enabling clockgating for hardware IPs.
2571  * Fini or suspend, pass disabling clockgating for hardware IPs.
2572  * Returns 0 on success, negative error code on failure.
2573  */
2574
2575 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2576                                enum amd_clockgating_state state)
2577 {
2578         int i, j, r;
2579
2580         if (amdgpu_emu_mode == 1)
2581                 return 0;
2582
2583         for (j = 0; j < adev->num_ip_blocks; j++) {
2584                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2585                 if (!adev->ip_blocks[i].status.late_initialized)
2586                         continue;
2587                 /* skip CG for GFX on S0ix */
2588                 if (adev->in_s0ix &&
2589                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2590                         continue;
2591                 /* skip CG for VCE/UVD, it's handled specially */
2592                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2593                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2594                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2595                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2596                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2597                         /* enable clockgating to save power */
2598                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2599                                                                                      state);
2600                         if (r) {
2601                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2602                                           adev->ip_blocks[i].version->funcs->name, r);
2603                                 return r;
2604                         }
2605                 }
2606         }
2607
2608         return 0;
2609 }
2610
2611 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2612                                enum amd_powergating_state state)
2613 {
2614         int i, j, r;
2615
2616         if (amdgpu_emu_mode == 1)
2617                 return 0;
2618
2619         for (j = 0; j < adev->num_ip_blocks; j++) {
2620                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2621                 if (!adev->ip_blocks[i].status.late_initialized)
2622                         continue;
2623                 /* skip PG for GFX on S0ix */
2624                 if (adev->in_s0ix &&
2625                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2626                         continue;
2627                 /* skip CG for VCE/UVD, it's handled specially */
2628                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2629                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2630                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2631                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2632                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2633                         /* enable powergating to save power */
2634                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2635                                                                                         state);
2636                         if (r) {
2637                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2638                                           adev->ip_blocks[i].version->funcs->name, r);
2639                                 return r;
2640                         }
2641                 }
2642         }
2643         return 0;
2644 }
2645
2646 static int amdgpu_device_enable_mgpu_fan_boost(void)
2647 {
2648         struct amdgpu_gpu_instance *gpu_ins;
2649         struct amdgpu_device *adev;
2650         int i, ret = 0;
2651
2652         mutex_lock(&mgpu_info.mutex);
2653
2654         /*
2655          * MGPU fan boost feature should be enabled
2656          * only when there are two or more dGPUs in
2657          * the system
2658          */
2659         if (mgpu_info.num_dgpu < 2)
2660                 goto out;
2661
2662         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2663                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2664                 adev = gpu_ins->adev;
2665                 if (!(adev->flags & AMD_IS_APU) &&
2666                     !gpu_ins->mgpu_fan_enabled) {
2667                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2668                         if (ret)
2669                                 break;
2670
2671                         gpu_ins->mgpu_fan_enabled = 1;
2672                 }
2673         }
2674
2675 out:
2676         mutex_unlock(&mgpu_info.mutex);
2677
2678         return ret;
2679 }
2680
2681 /**
2682  * amdgpu_device_ip_late_init - run late init for hardware IPs
2683  *
2684  * @adev: amdgpu_device pointer
2685  *
2686  * Late initialization pass for hardware IPs.  The list of all the hardware
2687  * IPs that make up the asic is walked and the late_init callbacks are run.
2688  * late_init covers any special initialization that an IP requires
2689  * after all of the have been initialized or something that needs to happen
2690  * late in the init process.
2691  * Returns 0 on success, negative error code on failure.
2692  */
2693 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2694 {
2695         struct amdgpu_gpu_instance *gpu_instance;
2696         int i = 0, r;
2697
2698         for (i = 0; i < adev->num_ip_blocks; i++) {
2699                 if (!adev->ip_blocks[i].status.hw)
2700                         continue;
2701                 if (adev->ip_blocks[i].version->funcs->late_init) {
2702                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2703                         if (r) {
2704                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2705                                           adev->ip_blocks[i].version->funcs->name, r);
2706                                 return r;
2707                         }
2708                 }
2709                 adev->ip_blocks[i].status.late_initialized = true;
2710         }
2711
2712         r = amdgpu_ras_late_init(adev);
2713         if (r) {
2714                 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2715                 return r;
2716         }
2717
2718         amdgpu_ras_set_error_query_ready(adev, true);
2719
2720         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2721         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2722
2723         amdgpu_device_fill_reset_magic(adev);
2724
2725         r = amdgpu_device_enable_mgpu_fan_boost();
2726         if (r)
2727                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2728
2729         /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2730         if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2731                                adev->asic_type == CHIP_ALDEBARAN ))
2732                 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2733
2734         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2735                 mutex_lock(&mgpu_info.mutex);
2736
2737                 /*
2738                  * Reset device p-state to low as this was booted with high.
2739                  *
2740                  * This should be performed only after all devices from the same
2741                  * hive get initialized.
2742                  *
2743                  * However, it's unknown how many device in the hive in advance.
2744                  * As this is counted one by one during devices initializations.
2745                  *
2746                  * So, we wait for all XGMI interlinked devices initialized.
2747                  * This may bring some delays as those devices may come from
2748                  * different hives. But that should be OK.
2749                  */
2750                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2751                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2752                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2753                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2754                                         continue;
2755
2756                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2757                                                 AMDGPU_XGMI_PSTATE_MIN);
2758                                 if (r) {
2759                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2760                                         break;
2761                                 }
2762                         }
2763                 }
2764
2765                 mutex_unlock(&mgpu_info.mutex);
2766         }
2767
2768         return 0;
2769 }
2770
2771 /**
2772  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2773  *
2774  * @adev: amdgpu_device pointer
2775  *
2776  * For ASICs need to disable SMC first
2777  */
2778 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2779 {
2780         int i, r;
2781
2782         if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2783                 return;
2784
2785         for (i = 0; i < adev->num_ip_blocks; i++) {
2786                 if (!adev->ip_blocks[i].status.hw)
2787                         continue;
2788                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2789                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2790                         /* XXX handle errors */
2791                         if (r) {
2792                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2793                                           adev->ip_blocks[i].version->funcs->name, r);
2794                         }
2795                         adev->ip_blocks[i].status.hw = false;
2796                         break;
2797                 }
2798         }
2799 }
2800
2801 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2802 {
2803         int i, r;
2804
2805         for (i = 0; i < adev->num_ip_blocks; i++) {
2806                 if (!adev->ip_blocks[i].version->funcs->early_fini)
2807                         continue;
2808
2809                 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2810                 if (r) {
2811                         DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2812                                   adev->ip_blocks[i].version->funcs->name, r);
2813                 }
2814         }
2815
2816         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2817         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2818
2819         amdgpu_amdkfd_suspend(adev, false);
2820
2821         /* Workaroud for ASICs need to disable SMC first */
2822         amdgpu_device_smu_fini_early(adev);
2823
2824         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2825                 if (!adev->ip_blocks[i].status.hw)
2826                         continue;
2827
2828                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2829                 /* XXX handle errors */
2830                 if (r) {
2831                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2832                                   adev->ip_blocks[i].version->funcs->name, r);
2833                 }
2834
2835                 adev->ip_blocks[i].status.hw = false;
2836         }
2837
2838         if (amdgpu_sriov_vf(adev)) {
2839                 if (amdgpu_virt_release_full_gpu(adev, false))
2840                         DRM_ERROR("failed to release exclusive mode on fini\n");
2841         }
2842
2843         return 0;
2844 }
2845
2846 /**
2847  * amdgpu_device_ip_fini - run fini for hardware IPs
2848  *
2849  * @adev: amdgpu_device pointer
2850  *
2851  * Main teardown pass for hardware IPs.  The list of all the hardware
2852  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2853  * are run.  hw_fini tears down the hardware associated with each IP
2854  * and sw_fini tears down any software state associated with each IP.
2855  * Returns 0 on success, negative error code on failure.
2856  */
2857 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2858 {
2859         int i, r;
2860
2861         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2862                 amdgpu_virt_release_ras_err_handler_data(adev);
2863
2864         if (adev->gmc.xgmi.num_physical_nodes > 1)
2865                 amdgpu_xgmi_remove_device(adev);
2866
2867         amdgpu_amdkfd_device_fini_sw(adev);
2868
2869         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2870                 if (!adev->ip_blocks[i].status.sw)
2871                         continue;
2872
2873                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2874                         amdgpu_ucode_free_bo(adev);
2875                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2876                         amdgpu_device_wb_fini(adev);
2877                         amdgpu_device_vram_scratch_fini(adev);
2878                         amdgpu_ib_pool_fini(adev);
2879                 }
2880
2881                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2882                 /* XXX handle errors */
2883                 if (r) {
2884                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2885                                   adev->ip_blocks[i].version->funcs->name, r);
2886                 }
2887                 adev->ip_blocks[i].status.sw = false;
2888                 adev->ip_blocks[i].status.valid = false;
2889         }
2890
2891         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2892                 if (!adev->ip_blocks[i].status.late_initialized)
2893                         continue;
2894                 if (adev->ip_blocks[i].version->funcs->late_fini)
2895                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2896                 adev->ip_blocks[i].status.late_initialized = false;
2897         }
2898
2899         amdgpu_ras_fini(adev);
2900
2901         return 0;
2902 }
2903
2904 /**
2905  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2906  *
2907  * @work: work_struct.
2908  */
2909 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2910 {
2911         struct amdgpu_device *adev =
2912                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2913         int r;
2914
2915         r = amdgpu_ib_ring_tests(adev);
2916         if (r)
2917                 DRM_ERROR("ib ring test failed (%d).\n", r);
2918 }
2919
2920 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2921 {
2922         struct amdgpu_device *adev =
2923                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2924
2925         WARN_ON_ONCE(adev->gfx.gfx_off_state);
2926         WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2927
2928         if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2929                 adev->gfx.gfx_off_state = true;
2930 }
2931
2932 /**
2933  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2934  *
2935  * @adev: amdgpu_device pointer
2936  *
2937  * Main suspend function for hardware IPs.  The list of all the hardware
2938  * IPs that make up the asic is walked, clockgating is disabled and the
2939  * suspend callbacks are run.  suspend puts the hardware and software state
2940  * in each IP into a state suitable for suspend.
2941  * Returns 0 on success, negative error code on failure.
2942  */
2943 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2944 {
2945         int i, r;
2946
2947         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2948         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2949
2950         /*
2951          * Per PMFW team's suggestion, driver needs to handle gfxoff
2952          * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2953          * scenario. Add the missing df cstate disablement here.
2954          */
2955         if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2956                 dev_warn(adev->dev, "Failed to disallow df cstate");
2957
2958         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2959                 if (!adev->ip_blocks[i].status.valid)
2960                         continue;
2961
2962                 /* displays are handled separately */
2963                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2964                         continue;
2965
2966                 /* XXX handle errors */
2967                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2968                 /* XXX handle errors */
2969                 if (r) {
2970                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2971                                   adev->ip_blocks[i].version->funcs->name, r);
2972                         return r;
2973                 }
2974
2975                 adev->ip_blocks[i].status.hw = false;
2976         }
2977
2978         return 0;
2979 }
2980
2981 /**
2982  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2983  *
2984  * @adev: amdgpu_device pointer
2985  *
2986  * Main suspend function for hardware IPs.  The list of all the hardware
2987  * IPs that make up the asic is walked, clockgating is disabled and the
2988  * suspend callbacks are run.  suspend puts the hardware and software state
2989  * in each IP into a state suitable for suspend.
2990  * Returns 0 on success, negative error code on failure.
2991  */
2992 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2993 {
2994         int i, r;
2995
2996         if (adev->in_s0ix)
2997                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2998
2999         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3000                 if (!adev->ip_blocks[i].status.valid)
3001                         continue;
3002                 /* displays are handled in phase1 */
3003                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3004                         continue;
3005                 /* PSP lost connection when err_event_athub occurs */
3006                 if (amdgpu_ras_intr_triggered() &&
3007                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3008                         adev->ip_blocks[i].status.hw = false;
3009                         continue;
3010                 }
3011
3012                 /* skip unnecessary suspend if we do not initialize them yet */
3013                 if (adev->gmc.xgmi.pending_reset &&
3014                     !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3015                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3016                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3017                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3018                         adev->ip_blocks[i].status.hw = false;
3019                         continue;
3020                 }
3021
3022                 /* skip suspend of gfx/mes and psp for S0ix
3023                  * gfx is in gfxoff state, so on resume it will exit gfxoff just
3024                  * like at runtime. PSP is also part of the always on hardware
3025                  * so no need to suspend it.
3026                  */
3027                 if (adev->in_s0ix &&
3028                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3029                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3030                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3031                         continue;
3032
3033                 /* XXX handle errors */
3034                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3035                 /* XXX handle errors */
3036                 if (r) {
3037                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
3038                                   adev->ip_blocks[i].version->funcs->name, r);
3039                 }
3040                 adev->ip_blocks[i].status.hw = false;
3041                 /* handle putting the SMC in the appropriate state */
3042                 if(!amdgpu_sriov_vf(adev)){
3043                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3044                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3045                                 if (r) {
3046                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3047                                                         adev->mp1_state, r);
3048                                         return r;
3049                                 }
3050                         }
3051                 }
3052         }
3053
3054         return 0;
3055 }
3056
3057 /**
3058  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3059  *
3060  * @adev: amdgpu_device pointer
3061  *
3062  * Main suspend function for hardware IPs.  The list of all the hardware
3063  * IPs that make up the asic is walked, clockgating is disabled and the
3064  * suspend callbacks are run.  suspend puts the hardware and software state
3065  * in each IP into a state suitable for suspend.
3066  * Returns 0 on success, negative error code on failure.
3067  */
3068 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3069 {
3070         int r;
3071
3072         if (amdgpu_sriov_vf(adev)) {
3073                 amdgpu_virt_fini_data_exchange(adev);
3074                 amdgpu_virt_request_full_gpu(adev, false);
3075         }
3076
3077         r = amdgpu_device_ip_suspend_phase1(adev);
3078         if (r)
3079                 return r;
3080         r = amdgpu_device_ip_suspend_phase2(adev);
3081
3082         if (amdgpu_sriov_vf(adev))
3083                 amdgpu_virt_release_full_gpu(adev, false);
3084
3085         return r;
3086 }
3087
3088 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3089 {
3090         int i, r;
3091
3092         static enum amd_ip_block_type ip_order[] = {
3093                 AMD_IP_BLOCK_TYPE_COMMON,
3094                 AMD_IP_BLOCK_TYPE_GMC,
3095                 AMD_IP_BLOCK_TYPE_PSP,
3096                 AMD_IP_BLOCK_TYPE_IH,
3097         };
3098
3099         for (i = 0; i < adev->num_ip_blocks; i++) {
3100                 int j;
3101                 struct amdgpu_ip_block *block;
3102
3103                 block = &adev->ip_blocks[i];
3104                 block->status.hw = false;
3105
3106                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3107
3108                         if (block->version->type != ip_order[j] ||
3109                                 !block->status.valid)
3110                                 continue;
3111
3112                         r = block->version->funcs->hw_init(adev);
3113                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3114                         if (r)
3115                                 return r;
3116                         block->status.hw = true;
3117                 }
3118         }
3119
3120         return 0;
3121 }
3122
3123 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3124 {
3125         int i, r;
3126
3127         static enum amd_ip_block_type ip_order[] = {
3128                 AMD_IP_BLOCK_TYPE_SMC,
3129                 AMD_IP_BLOCK_TYPE_DCE,
3130                 AMD_IP_BLOCK_TYPE_GFX,
3131                 AMD_IP_BLOCK_TYPE_SDMA,
3132                 AMD_IP_BLOCK_TYPE_UVD,
3133                 AMD_IP_BLOCK_TYPE_VCE,
3134                 AMD_IP_BLOCK_TYPE_VCN
3135         };
3136
3137         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3138                 int j;
3139                 struct amdgpu_ip_block *block;
3140
3141                 for (j = 0; j < adev->num_ip_blocks; j++) {
3142                         block = &adev->ip_blocks[j];
3143
3144                         if (block->version->type != ip_order[i] ||
3145                                 !block->status.valid ||
3146                                 block->status.hw)
3147                                 continue;
3148
3149                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3150                                 r = block->version->funcs->resume(adev);
3151                         else
3152                                 r = block->version->funcs->hw_init(adev);
3153
3154                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3155                         if (r)
3156                                 return r;
3157                         block->status.hw = true;
3158                 }
3159         }
3160
3161         return 0;
3162 }
3163
3164 /**
3165  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3166  *
3167  * @adev: amdgpu_device pointer
3168  *
3169  * First resume function for hardware IPs.  The list of all the hardware
3170  * IPs that make up the asic is walked and the resume callbacks are run for
3171  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3172  * after a suspend and updates the software state as necessary.  This
3173  * function is also used for restoring the GPU after a GPU reset.
3174  * Returns 0 on success, negative error code on failure.
3175  */
3176 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3177 {
3178         int i, r;
3179
3180         for (i = 0; i < adev->num_ip_blocks; i++) {
3181                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3182                         continue;
3183                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3184                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3185                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3186                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3187
3188                         r = adev->ip_blocks[i].version->funcs->resume(adev);
3189                         if (r) {
3190                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
3191                                           adev->ip_blocks[i].version->funcs->name, r);
3192                                 return r;
3193                         }
3194                         adev->ip_blocks[i].status.hw = true;
3195                 }
3196         }
3197
3198         return 0;
3199 }
3200
3201 /**
3202  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3203  *
3204  * @adev: amdgpu_device pointer
3205  *
3206  * First resume function for hardware IPs.  The list of all the hardware
3207  * IPs that make up the asic is walked and the resume callbacks are run for
3208  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3209  * functional state after a suspend and updates the software state as
3210  * necessary.  This function is also used for restoring the GPU after a GPU
3211  * reset.
3212  * Returns 0 on success, negative error code on failure.
3213  */
3214 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3215 {
3216         int i, r;
3217
3218         for (i = 0; i < adev->num_ip_blocks; i++) {
3219                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3220                         continue;
3221                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3222                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3223                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3224                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3225                         continue;
3226                 r = adev->ip_blocks[i].version->funcs->resume(adev);
3227                 if (r) {
3228                         DRM_ERROR("resume of IP block <%s> failed %d\n",
3229                                   adev->ip_blocks[i].version->funcs->name, r);
3230                         return r;
3231                 }
3232                 adev->ip_blocks[i].status.hw = true;
3233
3234                 if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3235                         /* disable gfxoff for IP resume. The gfxoff will be re-enabled in
3236                          * amdgpu_device_resume() after IP resume.
3237                          */
3238                         amdgpu_gfx_off_ctrl(adev, false);
3239                         DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
3240                 }
3241
3242         }
3243
3244         return 0;
3245 }
3246
3247 /**
3248  * amdgpu_device_ip_resume - run resume for hardware IPs
3249  *
3250  * @adev: amdgpu_device pointer
3251  *
3252  * Main resume function for hardware IPs.  The hardware IPs
3253  * are split into two resume functions because they are
3254  * are also used in in recovering from a GPU reset and some additional
3255  * steps need to be take between them.  In this case (S3/S4) they are
3256  * run sequentially.
3257  * Returns 0 on success, negative error code on failure.
3258  */
3259 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3260 {
3261         int r;
3262
3263         r = amdgpu_amdkfd_resume_iommu(adev);
3264         if (r)
3265                 return r;
3266
3267         r = amdgpu_device_ip_resume_phase1(adev);
3268         if (r)
3269                 return r;
3270
3271         r = amdgpu_device_fw_loading(adev);
3272         if (r)
3273                 return r;
3274
3275         r = amdgpu_device_ip_resume_phase2(adev);
3276
3277         return r;
3278 }
3279
3280 /**
3281  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3282  *
3283  * @adev: amdgpu_device pointer
3284  *
3285  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3286  */
3287 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3288 {
3289         if (amdgpu_sriov_vf(adev)) {
3290                 if (adev->is_atom_fw) {
3291                         if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3292                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3293                 } else {
3294                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3295                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3296                 }
3297
3298                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3299                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3300         }
3301 }
3302
3303 /**
3304  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3305  *
3306  * @asic_type: AMD asic type
3307  *
3308  * Check if there is DC (new modesetting infrastructre) support for an asic.
3309  * returns true if DC has support, false if not.
3310  */
3311 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3312 {
3313         switch (asic_type) {
3314 #ifdef CONFIG_DRM_AMDGPU_SI
3315         case CHIP_HAINAN:
3316 #endif
3317         case CHIP_TOPAZ:
3318                 /* chips with no display hardware */
3319                 return false;
3320 #if defined(CONFIG_DRM_AMD_DC)
3321         case CHIP_TAHITI:
3322         case CHIP_PITCAIRN:
3323         case CHIP_VERDE:
3324         case CHIP_OLAND:
3325                 /*
3326                  * We have systems in the wild with these ASICs that require
3327                  * LVDS and VGA support which is not supported with DC.
3328                  *
3329                  * Fallback to the non-DC driver here by default so as not to
3330                  * cause regressions.
3331                  */
3332 #if defined(CONFIG_DRM_AMD_DC_SI)
3333                 return amdgpu_dc > 0;
3334 #else
3335                 return false;
3336 #endif
3337         case CHIP_BONAIRE:
3338         case CHIP_KAVERI:
3339         case CHIP_KABINI:
3340         case CHIP_MULLINS:
3341                 /*
3342                  * We have systems in the wild with these ASICs that require
3343                  * VGA support which is not supported with DC.
3344                  *
3345                  * Fallback to the non-DC driver here by default so as not to
3346                  * cause regressions.
3347                  */
3348                 return amdgpu_dc > 0;
3349         default:
3350                 return amdgpu_dc != 0;
3351 #else
3352         default:
3353                 if (amdgpu_dc > 0)
3354                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3355                                          "but isn't supported by ASIC, ignoring\n");
3356                 return false;
3357 #endif
3358         }
3359 }
3360
3361 /**
3362  * amdgpu_device_has_dc_support - check if dc is supported
3363  *
3364  * @adev: amdgpu_device pointer
3365  *
3366  * Returns true for supported, false for not supported
3367  */
3368 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3369 {
3370         if (adev->enable_virtual_display ||
3371             (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3372                 return false;
3373
3374         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3375 }
3376
3377 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3378 {
3379         struct amdgpu_device *adev =
3380                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3381         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3382
3383         /* It's a bug to not have a hive within this function */
3384         if (WARN_ON(!hive))
3385                 return;
3386
3387         /*
3388          * Use task barrier to synchronize all xgmi reset works across the
3389          * hive. task_barrier_enter and task_barrier_exit will block
3390          * until all the threads running the xgmi reset works reach
3391          * those points. task_barrier_full will do both blocks.
3392          */
3393         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3394
3395                 task_barrier_enter(&hive->tb);
3396                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3397
3398                 if (adev->asic_reset_res)
3399                         goto fail;
3400
3401                 task_barrier_exit(&hive->tb);
3402                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3403
3404                 if (adev->asic_reset_res)
3405                         goto fail;
3406
3407                 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3408                     adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3409                         adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3410         } else {
3411
3412                 task_barrier_full(&hive->tb);
3413                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3414         }
3415
3416 fail:
3417         if (adev->asic_reset_res)
3418                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3419                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3420         amdgpu_put_xgmi_hive(hive);
3421 }
3422
3423 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3424 {
3425         char *input = amdgpu_lockup_timeout;
3426         char *timeout_setting = NULL;
3427         int index = 0;
3428         long timeout;
3429         int ret = 0;
3430
3431         /*
3432          * By default timeout for non compute jobs is 10000
3433          * and 60000 for compute jobs.
3434          * In SR-IOV or passthrough mode, timeout for compute
3435          * jobs are 60000 by default.
3436          */
3437         adev->gfx_timeout = msecs_to_jiffies(10000);
3438         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3439         if (amdgpu_sriov_vf(adev))
3440                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3441                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3442         else
3443                 adev->compute_timeout =  msecs_to_jiffies(60000);
3444
3445         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3446                 while ((timeout_setting = strsep(&input, ",")) &&
3447                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3448                         ret = kstrtol(timeout_setting, 0, &timeout);
3449                         if (ret)
3450                                 return ret;
3451
3452                         if (timeout == 0) {
3453                                 index++;
3454                                 continue;
3455                         } else if (timeout < 0) {
3456                                 timeout = MAX_SCHEDULE_TIMEOUT;
3457                                 dev_warn(adev->dev, "lockup timeout disabled");
3458                                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3459                         } else {
3460                                 timeout = msecs_to_jiffies(timeout);
3461                         }
3462
3463                         switch (index++) {
3464                         case 0:
3465                                 adev->gfx_timeout = timeout;
3466                                 break;
3467                         case 1:
3468                                 adev->compute_timeout = timeout;
3469                                 break;
3470                         case 2:
3471                                 adev->sdma_timeout = timeout;
3472                                 break;
3473                         case 3:
3474                                 adev->video_timeout = timeout;
3475                                 break;
3476                         default:
3477                                 break;
3478                         }
3479                 }
3480                 /*
3481                  * There is only one value specified and
3482                  * it should apply to all non-compute jobs.
3483                  */
3484                 if (index == 1) {
3485                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3486                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3487                                 adev->compute_timeout = adev->gfx_timeout;
3488                 }
3489         }
3490
3491         return ret;
3492 }
3493
3494 /**
3495  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3496  *
3497  * @adev: amdgpu_device pointer
3498  *
3499  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3500  */
3501 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3502 {
3503         struct iommu_domain *domain;
3504
3505         domain = iommu_get_domain_for_dev(adev->dev);
3506         if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3507                 adev->ram_is_direct_mapped = true;
3508 }
3509
3510 static const struct attribute *amdgpu_dev_attributes[] = {
3511         &dev_attr_product_name.attr,
3512         &dev_attr_product_number.attr,
3513         &dev_attr_serial_number.attr,
3514         &dev_attr_pcie_replay_count.attr,
3515         NULL
3516 };
3517
3518 /**
3519  * amdgpu_device_init - initialize the driver
3520  *
3521  * @adev: amdgpu_device pointer
3522  * @flags: driver flags
3523  *
3524  * Initializes the driver info and hw (all asics).
3525  * Returns 0 for success or an error on failure.
3526  * Called at driver startup.
3527  */
3528 int amdgpu_device_init(struct amdgpu_device *adev,
3529                        uint32_t flags)
3530 {
3531         struct drm_device *ddev = adev_to_drm(adev);
3532         struct pci_dev *pdev = adev->pdev;
3533         int r, i;
3534         bool px = false;
3535         u32 max_MBps;
3536
3537         adev->shutdown = false;
3538         adev->flags = flags;
3539
3540         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3541                 adev->asic_type = amdgpu_force_asic_type;
3542         else
3543                 adev->asic_type = flags & AMD_ASIC_MASK;
3544
3545         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3546         if (amdgpu_emu_mode == 1)
3547                 adev->usec_timeout *= 10;
3548         adev->gmc.gart_size = 512 * 1024 * 1024;
3549         adev->accel_working = false;
3550         adev->num_rings = 0;
3551         RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3552         adev->mman.buffer_funcs = NULL;
3553         adev->mman.buffer_funcs_ring = NULL;
3554         adev->vm_manager.vm_pte_funcs = NULL;
3555         adev->vm_manager.vm_pte_num_scheds = 0;
3556         adev->gmc.gmc_funcs = NULL;
3557         adev->harvest_ip_mask = 0x0;
3558         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3559         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3560
3561         adev->smc_rreg = &amdgpu_invalid_rreg;
3562         adev->smc_wreg = &amdgpu_invalid_wreg;
3563         adev->pcie_rreg = &amdgpu_invalid_rreg;
3564         adev->pcie_wreg = &amdgpu_invalid_wreg;
3565         adev->pciep_rreg = &amdgpu_invalid_rreg;
3566         adev->pciep_wreg = &amdgpu_invalid_wreg;
3567         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3568         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3569         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3570         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3571         adev->didt_rreg = &amdgpu_invalid_rreg;
3572         adev->didt_wreg = &amdgpu_invalid_wreg;
3573         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3574         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3575         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3576         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3577
3578         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3579                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3580                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3581
3582         /* mutex initialization are all done here so we
3583          * can recall function without having locking issues */
3584         mutex_init(&adev->firmware.mutex);
3585         mutex_init(&adev->pm.mutex);
3586         mutex_init(&adev->gfx.gpu_clock_mutex);
3587         mutex_init(&adev->srbm_mutex);
3588         mutex_init(&adev->gfx.pipe_reserve_mutex);
3589         mutex_init(&adev->gfx.gfx_off_mutex);
3590         mutex_init(&adev->grbm_idx_mutex);
3591         mutex_init(&adev->mn_lock);
3592         mutex_init(&adev->virt.vf_errors.lock);
3593         hash_init(adev->mn_hash);
3594         mutex_init(&adev->psp.mutex);
3595         mutex_init(&adev->notifier_lock);
3596         mutex_init(&adev->pm.stable_pstate_ctx_lock);
3597         mutex_init(&adev->benchmark_mutex);
3598
3599         amdgpu_device_init_apu_flags(adev);
3600
3601         r = amdgpu_device_check_arguments(adev);
3602         if (r)
3603                 return r;
3604
3605         spin_lock_init(&adev->mmio_idx_lock);
3606         spin_lock_init(&adev->smc_idx_lock);
3607         spin_lock_init(&adev->pcie_idx_lock);
3608         spin_lock_init(&adev->uvd_ctx_idx_lock);
3609         spin_lock_init(&adev->didt_idx_lock);
3610         spin_lock_init(&adev->gc_cac_idx_lock);
3611         spin_lock_init(&adev->se_cac_idx_lock);
3612         spin_lock_init(&adev->audio_endpt_idx_lock);
3613         spin_lock_init(&adev->mm_stats.lock);
3614
3615         INIT_LIST_HEAD(&adev->shadow_list);
3616         mutex_init(&adev->shadow_list_lock);
3617
3618         INIT_LIST_HEAD(&adev->reset_list);
3619
3620         INIT_LIST_HEAD(&adev->ras_list);
3621
3622         INIT_DELAYED_WORK(&adev->delayed_init_work,
3623                           amdgpu_device_delayed_init_work_handler);
3624         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3625                           amdgpu_device_delay_enable_gfx_off);
3626
3627         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3628
3629         adev->gfx.gfx_off_req_count = 1;
3630         adev->gfx.gfx_off_residency = 0;
3631         adev->gfx.gfx_off_entrycount = 0;
3632         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3633
3634         atomic_set(&adev->throttling_logging_enabled, 1);
3635         /*
3636          * If throttling continues, logging will be performed every minute
3637          * to avoid log flooding. "-1" is subtracted since the thermal
3638          * throttling interrupt comes every second. Thus, the total logging
3639          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3640          * for throttling interrupt) = 60 seconds.
3641          */
3642         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3643         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3644
3645         /* Registers mapping */
3646         /* TODO: block userspace mapping of io register */
3647         if (adev->asic_type >= CHIP_BONAIRE) {
3648                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3649                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3650         } else {
3651                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3652                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3653         }
3654
3655         for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3656                 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3657
3658         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3659         if (adev->rmmio == NULL) {
3660                 return -ENOMEM;
3661         }
3662         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3663         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3664
3665         amdgpu_device_get_pcie_info(adev);
3666
3667         if (amdgpu_mcbp)
3668                 DRM_INFO("MCBP is enabled\n");
3669
3670         /*
3671          * Reset domain needs to be present early, before XGMI hive discovered
3672          * (if any) and intitialized to use reset sem and in_gpu reset flag
3673          * early on during init and before calling to RREG32.
3674          */
3675         adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3676         if (!adev->reset_domain)
3677                 return -ENOMEM;
3678
3679         /* detect hw virtualization here */
3680         amdgpu_detect_virtualization(adev);
3681
3682         r = amdgpu_device_get_job_timeout_settings(adev);
3683         if (r) {
3684                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3685                 return r;
3686         }
3687
3688         /* early init functions */
3689         r = amdgpu_device_ip_early_init(adev);
3690         if (r)
3691                 return r;
3692
3693         /* Get rid of things like offb */
3694         r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
3695         if (r)
3696                 return r;
3697
3698         /* Enable TMZ based on IP_VERSION */
3699         amdgpu_gmc_tmz_set(adev);
3700
3701         amdgpu_gmc_noretry_set(adev);
3702         /* Need to get xgmi info early to decide the reset behavior*/
3703         if (adev->gmc.xgmi.supported) {
3704                 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3705                 if (r)
3706                         return r;
3707         }
3708
3709         /* enable PCIE atomic ops */
3710         if (amdgpu_sriov_vf(adev))
3711                 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3712                         adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3713                         (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3714         else
3715                 adev->have_atomics_support =
3716                         !pci_enable_atomic_ops_to_root(adev->pdev,
3717                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3718                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3719         if (!adev->have_atomics_support)
3720                 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3721
3722         /* doorbell bar mapping and doorbell index init*/
3723         amdgpu_device_doorbell_init(adev);
3724
3725         if (amdgpu_emu_mode == 1) {
3726                 /* post the asic on emulation mode */
3727                 emu_soc_asic_init(adev);
3728                 goto fence_driver_init;
3729         }
3730
3731         amdgpu_reset_init(adev);
3732
3733         /* detect if we are with an SRIOV vbios */
3734         amdgpu_device_detect_sriov_bios(adev);
3735
3736         /* check if we need to reset the asic
3737          *  E.g., driver was not cleanly unloaded previously, etc.
3738          */
3739         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3740                 if (adev->gmc.xgmi.num_physical_nodes) {
3741                         dev_info(adev->dev, "Pending hive reset.\n");
3742                         adev->gmc.xgmi.pending_reset = true;
3743                         /* Only need to init necessary block for SMU to handle the reset */
3744                         for (i = 0; i < adev->num_ip_blocks; i++) {
3745                                 if (!adev->ip_blocks[i].status.valid)
3746                                         continue;
3747                                 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3748                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3749                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3750                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3751                                         DRM_DEBUG("IP %s disabled for hw_init.\n",
3752                                                 adev->ip_blocks[i].version->funcs->name);
3753                                         adev->ip_blocks[i].status.hw = true;
3754                                 }
3755                         }
3756                 } else {
3757                         r = amdgpu_asic_reset(adev);
3758                         if (r) {
3759                                 dev_err(adev->dev, "asic reset on init failed\n");
3760                                 goto failed;
3761                         }
3762                 }
3763         }
3764
3765         pci_enable_pcie_error_reporting(adev->pdev);
3766
3767         /* Post card if necessary */
3768         if (amdgpu_device_need_post(adev)) {
3769                 if (!adev->bios) {
3770                         dev_err(adev->dev, "no vBIOS found\n");
3771                         r = -EINVAL;
3772                         goto failed;
3773                 }
3774                 DRM_INFO("GPU posting now...\n");
3775                 r = amdgpu_device_asic_init(adev);
3776                 if (r) {
3777                         dev_err(adev->dev, "gpu post error!\n");
3778                         goto failed;
3779                 }
3780         }
3781
3782         if (adev->is_atom_fw) {
3783                 /* Initialize clocks */
3784                 r = amdgpu_atomfirmware_get_clock_info(adev);
3785                 if (r) {
3786                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3787                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3788                         goto failed;
3789                 }
3790         } else {
3791                 /* Initialize clocks */
3792                 r = amdgpu_atombios_get_clock_info(adev);
3793                 if (r) {
3794                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3795                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3796                         goto failed;
3797                 }
3798                 /* init i2c buses */
3799                 if (!amdgpu_device_has_dc_support(adev))
3800                         amdgpu_atombios_i2c_init(adev);
3801         }
3802
3803 fence_driver_init:
3804         /* Fence driver */
3805         r = amdgpu_fence_driver_sw_init(adev);
3806         if (r) {
3807                 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3808                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3809                 goto failed;
3810         }
3811
3812         /* init the mode config */
3813         drm_mode_config_init(adev_to_drm(adev));
3814
3815         r = amdgpu_device_ip_init(adev);
3816         if (r) {
3817                 /* failed in exclusive mode due to timeout */
3818                 if (amdgpu_sriov_vf(adev) &&
3819                     !amdgpu_sriov_runtime(adev) &&
3820                     amdgpu_virt_mmio_blocked(adev) &&
3821                     !amdgpu_virt_wait_reset(adev)) {
3822                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3823                         /* Don't send request since VF is inactive. */
3824                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3825                         adev->virt.ops = NULL;
3826                         r = -EAGAIN;
3827                         goto release_ras_con;
3828                 }
3829                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3830                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3831                 goto release_ras_con;
3832         }
3833
3834         amdgpu_fence_driver_hw_init(adev);
3835
3836         dev_info(adev->dev,
3837                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3838                         adev->gfx.config.max_shader_engines,
3839                         adev->gfx.config.max_sh_per_se,
3840                         adev->gfx.config.max_cu_per_sh,
3841                         adev->gfx.cu_info.number);
3842
3843         adev->accel_working = true;
3844
3845         amdgpu_vm_check_compute_bug(adev);
3846
3847         /* Initialize the buffer migration limit. */
3848         if (amdgpu_moverate >= 0)
3849                 max_MBps = amdgpu_moverate;
3850         else
3851                 max_MBps = 8; /* Allow 8 MB/s. */
3852         /* Get a log2 for easy divisions. */
3853         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3854
3855         r = amdgpu_pm_sysfs_init(adev);
3856         if (r) {
3857                 adev->pm_sysfs_en = false;
3858                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3859         } else
3860                 adev->pm_sysfs_en = true;
3861
3862         r = amdgpu_ucode_sysfs_init(adev);
3863         if (r) {
3864                 adev->ucode_sysfs_en = false;
3865                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3866         } else
3867                 adev->ucode_sysfs_en = true;
3868
3869         r = amdgpu_psp_sysfs_init(adev);
3870         if (r) {
3871                 adev->psp_sysfs_en = false;
3872                 if (!amdgpu_sriov_vf(adev))
3873                         DRM_ERROR("Creating psp sysfs failed\n");
3874         } else
3875                 adev->psp_sysfs_en = true;
3876
3877         /*
3878          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3879          * Otherwise the mgpu fan boost feature will be skipped due to the
3880          * gpu instance is counted less.
3881          */
3882         amdgpu_register_gpu_instance(adev);
3883
3884         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3885          * explicit gating rather than handling it automatically.
3886          */
3887         if (!adev->gmc.xgmi.pending_reset) {
3888                 r = amdgpu_device_ip_late_init(adev);
3889                 if (r) {
3890                         dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3891                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3892                         goto release_ras_con;
3893                 }
3894                 /* must succeed. */
3895                 amdgpu_ras_resume(adev);
3896                 queue_delayed_work(system_wq, &adev->delayed_init_work,
3897                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3898         }
3899
3900         if (amdgpu_sriov_vf(adev))
3901                 flush_delayed_work(&adev->delayed_init_work);
3902
3903         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3904         if (r)
3905                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3906
3907         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3908                 r = amdgpu_pmu_init(adev);
3909         if (r)
3910                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3911
3912         /* Have stored pci confspace at hand for restore in sudden PCI error */
3913         if (amdgpu_device_cache_pci_state(adev->pdev))
3914                 pci_restore_state(pdev);
3915
3916         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3917         /* this will fail for cards that aren't VGA class devices, just
3918          * ignore it */
3919         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3920                 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3921
3922         if (amdgpu_device_supports_px(ddev)) {
3923                 px = true;
3924                 vga_switcheroo_register_client(adev->pdev,
3925                                                &amdgpu_switcheroo_ops, px);
3926                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3927         }
3928
3929         if (adev->gmc.xgmi.pending_reset)
3930                 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3931                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3932
3933         amdgpu_device_check_iommu_direct_map(adev);
3934
3935         return 0;
3936
3937 release_ras_con:
3938         amdgpu_release_ras_context(adev);
3939
3940 failed:
3941         amdgpu_vf_error_trans_all(adev);
3942
3943         return r;
3944 }
3945
3946 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3947 {
3948
3949         /* Clear all CPU mappings pointing to this device */
3950         unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3951
3952         /* Unmap all mapped bars - Doorbell, registers and VRAM */
3953         amdgpu_device_doorbell_fini(adev);
3954
3955         iounmap(adev->rmmio);
3956         adev->rmmio = NULL;
3957         if (adev->mman.aper_base_kaddr)
3958                 iounmap(adev->mman.aper_base_kaddr);
3959         adev->mman.aper_base_kaddr = NULL;
3960
3961         /* Memory manager related */
3962         if (!adev->gmc.xgmi.connected_to_cpu) {
3963                 arch_phys_wc_del(adev->gmc.vram_mtrr);
3964                 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3965         }
3966 }
3967
3968 /**
3969  * amdgpu_device_fini_hw - tear down the driver
3970  *
3971  * @adev: amdgpu_device pointer
3972  *
3973  * Tear down the driver info (all asics).
3974  * Called at driver shutdown.
3975  */
3976 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3977 {
3978         dev_info(adev->dev, "amdgpu: finishing device.\n");
3979         flush_delayed_work(&adev->delayed_init_work);
3980         adev->shutdown = true;
3981
3982         /* make sure IB test finished before entering exclusive mode
3983          * to avoid preemption on IB test
3984          * */
3985         if (amdgpu_sriov_vf(adev)) {
3986                 amdgpu_virt_request_full_gpu(adev, false);
3987                 amdgpu_virt_fini_data_exchange(adev);
3988         }
3989
3990         /* disable all interrupts */
3991         amdgpu_irq_disable_all(adev);
3992         if (adev->mode_info.mode_config_initialized){
3993                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3994                         drm_helper_force_disable_all(adev_to_drm(adev));
3995                 else
3996                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3997         }
3998         amdgpu_fence_driver_hw_fini(adev);
3999
4000         if (adev->mman.initialized) {
4001                 flush_delayed_work(&adev->mman.bdev.wq);
4002                 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
4003         }
4004
4005         if (adev->pm_sysfs_en)
4006                 amdgpu_pm_sysfs_fini(adev);
4007         if (adev->ucode_sysfs_en)
4008                 amdgpu_ucode_sysfs_fini(adev);
4009         if (adev->psp_sysfs_en)
4010                 amdgpu_psp_sysfs_fini(adev);
4011         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4012
4013         /* disable ras feature must before hw fini */
4014         amdgpu_ras_pre_fini(adev);
4015
4016         amdgpu_device_ip_fini_early(adev);
4017
4018         amdgpu_irq_fini_hw(adev);
4019
4020         if (adev->mman.initialized)
4021                 ttm_device_clear_dma_mappings(&adev->mman.bdev);
4022
4023         amdgpu_gart_dummy_page_fini(adev);
4024
4025         amdgpu_device_unmap_mmio(adev);
4026
4027 }
4028
4029 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4030 {
4031         int idx;
4032
4033         amdgpu_fence_driver_sw_fini(adev);
4034         amdgpu_device_ip_fini(adev);
4035         release_firmware(adev->firmware.gpu_info_fw);
4036         adev->firmware.gpu_info_fw = NULL;
4037         adev->accel_working = false;
4038         dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4039
4040         amdgpu_reset_fini(adev);
4041
4042         /* free i2c buses */
4043         if (!amdgpu_device_has_dc_support(adev))
4044                 amdgpu_i2c_fini(adev);
4045
4046         if (amdgpu_emu_mode != 1)
4047                 amdgpu_atombios_fini(adev);
4048
4049         kfree(adev->bios);
4050         adev->bios = NULL;
4051         if (amdgpu_device_supports_px(adev_to_drm(adev))) {
4052                 vga_switcheroo_unregister_client(adev->pdev);
4053                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4054         }
4055         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4056                 vga_client_unregister(adev->pdev);
4057
4058         if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4059
4060                 iounmap(adev->rmmio);
4061                 adev->rmmio = NULL;
4062                 amdgpu_device_doorbell_fini(adev);
4063                 drm_dev_exit(idx);
4064         }
4065
4066         if (IS_ENABLED(CONFIG_PERF_EVENTS))
4067                 amdgpu_pmu_fini(adev);
4068         if (adev->mman.discovery_bin)
4069                 amdgpu_discovery_fini(adev);
4070
4071         amdgpu_reset_put_reset_domain(adev->reset_domain);
4072         adev->reset_domain = NULL;
4073
4074         kfree(adev->pci_state);
4075
4076 }
4077
4078 /**
4079  * amdgpu_device_evict_resources - evict device resources
4080  * @adev: amdgpu device object
4081  *
4082  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4083  * of the vram memory type. Mainly used for evicting device resources
4084  * at suspend time.
4085  *
4086  */
4087 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4088 {
4089         int ret;
4090
4091         /* No need to evict vram on APUs for suspend to ram or s2idle */
4092         if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4093                 return 0;
4094
4095         ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4096         if (ret)
4097                 DRM_WARN("evicting device resources failed\n");
4098         return ret;
4099 }
4100
4101 /*
4102  * Suspend & resume.
4103  */
4104 /**
4105  * amdgpu_device_suspend - initiate device suspend
4106  *
4107  * @dev: drm dev pointer
4108  * @fbcon : notify the fbdev of suspend
4109  *
4110  * Puts the hw in the suspend state (all asics).
4111  * Returns 0 for success or an error on failure.
4112  * Called at driver suspend.
4113  */
4114 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4115 {
4116         struct amdgpu_device *adev = drm_to_adev(dev);
4117         int r = 0;
4118
4119         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4120                 return 0;
4121
4122         adev->in_suspend = true;
4123
4124         /* Evict the majority of BOs before grabbing the full access */
4125         r = amdgpu_device_evict_resources(adev);
4126         if (r)
4127                 return r;
4128
4129         if (amdgpu_sriov_vf(adev)) {
4130                 amdgpu_virt_fini_data_exchange(adev);
4131                 r = amdgpu_virt_request_full_gpu(adev, false);
4132                 if (r)
4133                         return r;
4134         }
4135
4136         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4137                 DRM_WARN("smart shift update failed\n");
4138
4139         drm_kms_helper_poll_disable(dev);
4140
4141         if (fbcon)
4142                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4143
4144         cancel_delayed_work_sync(&adev->delayed_init_work);
4145
4146         amdgpu_ras_suspend(adev);
4147
4148         amdgpu_device_ip_suspend_phase1(adev);
4149
4150         if (!adev->in_s0ix)
4151                 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4152
4153         r = amdgpu_device_evict_resources(adev);
4154         if (r)
4155                 return r;
4156
4157         amdgpu_fence_driver_hw_fini(adev);
4158
4159         amdgpu_device_ip_suspend_phase2(adev);
4160
4161         if (amdgpu_sriov_vf(adev))
4162                 amdgpu_virt_release_full_gpu(adev, false);
4163
4164         return 0;
4165 }
4166
4167 /**
4168  * amdgpu_device_resume - initiate device resume
4169  *
4170  * @dev: drm dev pointer
4171  * @fbcon : notify the fbdev of resume
4172  *
4173  * Bring the hw back to operating state (all asics).
4174  * Returns 0 for success or an error on failure.
4175  * Called at driver resume.
4176  */
4177 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4178 {
4179         struct amdgpu_device *adev = drm_to_adev(dev);
4180         int r = 0;
4181
4182         if (amdgpu_sriov_vf(adev)) {
4183                 r = amdgpu_virt_request_full_gpu(adev, true);
4184                 if (r)
4185                         return r;
4186         }
4187
4188         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4189                 return 0;
4190
4191         if (adev->in_s0ix)
4192                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4193
4194         /* post card */
4195         if (amdgpu_device_need_post(adev)) {
4196                 r = amdgpu_device_asic_init(adev);
4197                 if (r)
4198                         dev_err(adev->dev, "amdgpu asic init failed\n");
4199         }
4200
4201         r = amdgpu_device_ip_resume(adev);
4202
4203         if (r) {
4204                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4205                 goto exit;
4206         }
4207         amdgpu_fence_driver_hw_init(adev);
4208
4209         r = amdgpu_device_ip_late_init(adev);
4210         if (r)
4211                 goto exit;
4212
4213         queue_delayed_work(system_wq, &adev->delayed_init_work,
4214                            msecs_to_jiffies(AMDGPU_RESUME_MS));
4215
4216         if (!adev->in_s0ix) {
4217                 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4218                 if (r)
4219                         goto exit;
4220         }
4221
4222 exit:
4223         if (amdgpu_sriov_vf(adev)) {
4224                 amdgpu_virt_init_data_exchange(adev);
4225                 amdgpu_virt_release_full_gpu(adev, true);
4226         }
4227
4228         if (r)
4229                 return r;
4230
4231         /* Make sure IB tests flushed */
4232         flush_delayed_work(&adev->delayed_init_work);
4233
4234         if (adev->in_s0ix) {
4235                 /* re-enable gfxoff after IP resume. This re-enables gfxoff after
4236                  * it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
4237                  */
4238                 amdgpu_gfx_off_ctrl(adev, true);
4239                 DRM_DEBUG("will enable gfxoff for the mission mode\n");
4240         }
4241         if (fbcon)
4242                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4243
4244         drm_kms_helper_poll_enable(dev);
4245
4246         amdgpu_ras_resume(adev);
4247
4248         if (adev->mode_info.num_crtc) {
4249                 /*
4250                  * Most of the connector probing functions try to acquire runtime pm
4251                  * refs to ensure that the GPU is powered on when connector polling is
4252                  * performed. Since we're calling this from a runtime PM callback,
4253                  * trying to acquire rpm refs will cause us to deadlock.
4254                  *
4255                  * Since we're guaranteed to be holding the rpm lock, it's safe to
4256                  * temporarily disable the rpm helpers so this doesn't deadlock us.
4257                  */
4258 #ifdef CONFIG_PM
4259                 dev->dev->power.disable_depth++;
4260 #endif
4261                 if (!adev->dc_enabled)
4262                         drm_helper_hpd_irq_event(dev);
4263                 else
4264                         drm_kms_helper_hotplug_event(dev);
4265 #ifdef CONFIG_PM
4266                 dev->dev->power.disable_depth--;
4267 #endif
4268         }
4269         adev->in_suspend = false;
4270
4271         if (adev->enable_mes)
4272                 amdgpu_mes_self_test(adev);
4273
4274         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4275                 DRM_WARN("smart shift update failed\n");
4276
4277         return 0;
4278 }
4279
4280 /**
4281  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4282  *
4283  * @adev: amdgpu_device pointer
4284  *
4285  * The list of all the hardware IPs that make up the asic is walked and
4286  * the check_soft_reset callbacks are run.  check_soft_reset determines
4287  * if the asic is still hung or not.
4288  * Returns true if any of the IPs are still in a hung state, false if not.
4289  */
4290 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4291 {
4292         int i;
4293         bool asic_hang = false;
4294
4295         if (amdgpu_sriov_vf(adev))
4296                 return true;
4297
4298         if (amdgpu_asic_need_full_reset(adev))
4299                 return true;
4300
4301         for (i = 0; i < adev->num_ip_blocks; i++) {
4302                 if (!adev->ip_blocks[i].status.valid)
4303                         continue;
4304                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4305                         adev->ip_blocks[i].status.hang =
4306                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4307                 if (adev->ip_blocks[i].status.hang) {
4308                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4309                         asic_hang = true;
4310                 }
4311         }
4312         return asic_hang;
4313 }
4314
4315 /**
4316  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4317  *
4318  * @adev: amdgpu_device pointer
4319  *
4320  * The list of all the hardware IPs that make up the asic is walked and the
4321  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4322  * handles any IP specific hardware or software state changes that are
4323  * necessary for a soft reset to succeed.
4324  * Returns 0 on success, negative error code on failure.
4325  */
4326 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4327 {
4328         int i, r = 0;
4329
4330         for (i = 0; i < adev->num_ip_blocks; i++) {
4331                 if (!adev->ip_blocks[i].status.valid)
4332                         continue;
4333                 if (adev->ip_blocks[i].status.hang &&
4334                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4335                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4336                         if (r)
4337                                 return r;
4338                 }
4339         }
4340
4341         return 0;
4342 }
4343
4344 /**
4345  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4346  *
4347  * @adev: amdgpu_device pointer
4348  *
4349  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4350  * reset is necessary to recover.
4351  * Returns true if a full asic reset is required, false if not.
4352  */
4353 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4354 {
4355         int i;
4356
4357         if (amdgpu_asic_need_full_reset(adev))
4358                 return true;
4359
4360         for (i = 0; i < adev->num_ip_blocks; i++) {
4361                 if (!adev->ip_blocks[i].status.valid)
4362                         continue;
4363                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4364                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4365                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4366                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4367                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4368                         if (adev->ip_blocks[i].status.hang) {
4369                                 dev_info(adev->dev, "Some block need full reset!\n");
4370                                 return true;
4371                         }
4372                 }
4373         }
4374         return false;
4375 }
4376
4377 /**
4378  * amdgpu_device_ip_soft_reset - do a soft reset
4379  *
4380  * @adev: amdgpu_device pointer
4381  *
4382  * The list of all the hardware IPs that make up the asic is walked and the
4383  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4384  * IP specific hardware or software state changes that are necessary to soft
4385  * reset the IP.
4386  * Returns 0 on success, negative error code on failure.
4387  */
4388 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4389 {
4390         int i, r = 0;
4391
4392         for (i = 0; i < adev->num_ip_blocks; i++) {
4393                 if (!adev->ip_blocks[i].status.valid)
4394                         continue;
4395                 if (adev->ip_blocks[i].status.hang &&
4396                     adev->ip_blocks[i].version->funcs->soft_reset) {
4397                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4398                         if (r)
4399                                 return r;
4400                 }
4401         }
4402
4403         return 0;
4404 }
4405
4406 /**
4407  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4408  *
4409  * @adev: amdgpu_device pointer
4410  *
4411  * The list of all the hardware IPs that make up the asic is walked and the
4412  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4413  * handles any IP specific hardware or software state changes that are
4414  * necessary after the IP has been soft reset.
4415  * Returns 0 on success, negative error code on failure.
4416  */
4417 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4418 {
4419         int i, r = 0;
4420
4421         for (i = 0; i < adev->num_ip_blocks; i++) {
4422                 if (!adev->ip_blocks[i].status.valid)
4423                         continue;
4424                 if (adev->ip_blocks[i].status.hang &&
4425                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4426                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4427                 if (r)
4428                         return r;
4429         }
4430
4431         return 0;
4432 }
4433
4434 /**
4435  * amdgpu_device_recover_vram - Recover some VRAM contents
4436  *
4437  * @adev: amdgpu_device pointer
4438  *
4439  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4440  * restore things like GPUVM page tables after a GPU reset where
4441  * the contents of VRAM might be lost.
4442  *
4443  * Returns:
4444  * 0 on success, negative error code on failure.
4445  */
4446 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4447 {
4448         struct dma_fence *fence = NULL, *next = NULL;
4449         struct amdgpu_bo *shadow;
4450         struct amdgpu_bo_vm *vmbo;
4451         long r = 1, tmo;
4452
4453         if (amdgpu_sriov_runtime(adev))
4454                 tmo = msecs_to_jiffies(8000);
4455         else
4456                 tmo = msecs_to_jiffies(100);
4457
4458         dev_info(adev->dev, "recover vram bo from shadow start\n");
4459         mutex_lock(&adev->shadow_list_lock);
4460         list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4461                 shadow = &vmbo->bo;
4462                 /* No need to recover an evicted BO */
4463                 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4464                     shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4465                     shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4466                         continue;
4467
4468                 r = amdgpu_bo_restore_shadow(shadow, &next);
4469                 if (r)
4470                         break;
4471
4472                 if (fence) {
4473                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4474                         dma_fence_put(fence);
4475                         fence = next;
4476                         if (tmo == 0) {
4477                                 r = -ETIMEDOUT;
4478                                 break;
4479                         } else if (tmo < 0) {
4480                                 r = tmo;
4481                                 break;
4482                         }
4483                 } else {
4484                         fence = next;
4485                 }
4486         }
4487         mutex_unlock(&adev->shadow_list_lock);
4488
4489         if (fence)
4490                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4491         dma_fence_put(fence);
4492
4493         if (r < 0 || tmo <= 0) {
4494                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4495                 return -EIO;
4496         }
4497
4498         dev_info(adev->dev, "recover vram bo from shadow done\n");
4499         return 0;
4500 }
4501
4502
4503 /**
4504  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4505  *
4506  * @adev: amdgpu_device pointer
4507  * @from_hypervisor: request from hypervisor
4508  *
4509  * do VF FLR and reinitialize Asic
4510  * return 0 means succeeded otherwise failed
4511  */
4512 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4513                                      bool from_hypervisor)
4514 {
4515         int r;
4516         struct amdgpu_hive_info *hive = NULL;
4517         int retry_limit = 0;
4518
4519 retry:
4520         amdgpu_amdkfd_pre_reset(adev);
4521
4522         if (from_hypervisor)
4523                 r = amdgpu_virt_request_full_gpu(adev, true);
4524         else
4525                 r = amdgpu_virt_reset_gpu(adev);
4526         if (r)
4527                 return r;
4528
4529         /* Resume IP prior to SMC */
4530         r = amdgpu_device_ip_reinit_early_sriov(adev);
4531         if (r)
4532                 goto error;
4533
4534         amdgpu_virt_init_data_exchange(adev);
4535
4536         r = amdgpu_device_fw_loading(adev);
4537         if (r)
4538                 return r;
4539
4540         /* now we are okay to resume SMC/CP/SDMA */
4541         r = amdgpu_device_ip_reinit_late_sriov(adev);
4542         if (r)
4543                 goto error;
4544
4545         hive = amdgpu_get_xgmi_hive(adev);
4546         /* Update PSP FW topology after reset */
4547         if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4548                 r = amdgpu_xgmi_update_topology(hive, adev);
4549
4550         if (hive)
4551                 amdgpu_put_xgmi_hive(hive);
4552
4553         if (!r) {
4554                 amdgpu_irq_gpu_reset_resume_helper(adev);
4555                 r = amdgpu_ib_ring_tests(adev);
4556
4557                 amdgpu_amdkfd_post_reset(adev);
4558         }
4559
4560 error:
4561         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4562                 amdgpu_inc_vram_lost(adev);
4563                 r = amdgpu_device_recover_vram(adev);
4564         }
4565         amdgpu_virt_release_full_gpu(adev, true);
4566
4567         if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4568                 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4569                         retry_limit++;
4570                         goto retry;
4571                 } else
4572                         DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4573         }
4574
4575         return r;
4576 }
4577
4578 /**
4579  * amdgpu_device_has_job_running - check if there is any job in mirror list
4580  *
4581  * @adev: amdgpu_device pointer
4582  *
4583  * check if there is any job in mirror list
4584  */
4585 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4586 {
4587         int i;
4588         struct drm_sched_job *job;
4589
4590         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4591                 struct amdgpu_ring *ring = adev->rings[i];
4592
4593                 if (!ring || !ring->sched.thread)
4594                         continue;
4595
4596                 spin_lock(&ring->sched.job_list_lock);
4597                 job = list_first_entry_or_null(&ring->sched.pending_list,
4598                                                struct drm_sched_job, list);
4599                 spin_unlock(&ring->sched.job_list_lock);
4600                 if (job)
4601                         return true;
4602         }
4603         return false;
4604 }
4605
4606 /**
4607  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4608  *
4609  * @adev: amdgpu_device pointer
4610  *
4611  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4612  * a hung GPU.
4613  */
4614 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4615 {
4616
4617         if (amdgpu_gpu_recovery == 0)
4618                 goto disabled;
4619
4620         /* Skip soft reset check in fatal error mode */
4621         if (!amdgpu_ras_is_poison_mode_supported(adev))
4622                 return true;
4623
4624         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4625                 dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
4626                 return false;
4627         }
4628
4629         if (amdgpu_sriov_vf(adev))
4630                 return true;
4631
4632         if (amdgpu_gpu_recovery == -1) {
4633                 switch (adev->asic_type) {
4634 #ifdef CONFIG_DRM_AMDGPU_SI
4635                 case CHIP_VERDE:
4636                 case CHIP_TAHITI:
4637                 case CHIP_PITCAIRN:
4638                 case CHIP_OLAND:
4639                 case CHIP_HAINAN:
4640 #endif
4641 #ifdef CONFIG_DRM_AMDGPU_CIK
4642                 case CHIP_KAVERI:
4643                 case CHIP_KABINI:
4644                 case CHIP_MULLINS:
4645 #endif
4646                 case CHIP_CARRIZO:
4647                 case CHIP_STONEY:
4648                 case CHIP_CYAN_SKILLFISH:
4649                         goto disabled;
4650                 default:
4651                         break;
4652                 }
4653         }
4654
4655         return true;
4656
4657 disabled:
4658                 dev_info(adev->dev, "GPU recovery disabled.\n");
4659                 return false;
4660 }
4661
4662 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4663 {
4664         u32 i;
4665         int ret = 0;
4666
4667         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4668
4669         dev_info(adev->dev, "GPU mode1 reset\n");
4670
4671         /* disable BM */
4672         pci_clear_master(adev->pdev);
4673
4674         amdgpu_device_cache_pci_state(adev->pdev);
4675
4676         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4677                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4678                 ret = amdgpu_dpm_mode1_reset(adev);
4679         } else {
4680                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4681                 ret = psp_gpu_reset(adev);
4682         }
4683
4684         if (ret)
4685                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4686
4687         amdgpu_device_load_pci_state(adev->pdev);
4688
4689         /* wait for asic to come out of reset */
4690         for (i = 0; i < adev->usec_timeout; i++) {
4691                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4692
4693                 if (memsize != 0xffffffff)
4694                         break;
4695                 udelay(1);
4696         }
4697
4698         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4699         return ret;
4700 }
4701
4702 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4703                                  struct amdgpu_reset_context *reset_context)
4704 {
4705         int i, r = 0;
4706         struct amdgpu_job *job = NULL;
4707         bool need_full_reset =
4708                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4709
4710         if (reset_context->reset_req_dev == adev)
4711                 job = reset_context->job;
4712
4713         if (amdgpu_sriov_vf(adev)) {
4714                 /* stop the data exchange thread */
4715                 amdgpu_virt_fini_data_exchange(adev);
4716         }
4717
4718         amdgpu_fence_driver_isr_toggle(adev, true);
4719
4720         /* block all schedulers and reset given job's ring */
4721         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4722                 struct amdgpu_ring *ring = adev->rings[i];
4723
4724                 if (!ring || !ring->sched.thread)
4725                         continue;
4726
4727                 /*clear job fence from fence drv to avoid force_completion
4728                  *leave NULL and vm flush fence in fence drv */
4729                 amdgpu_fence_driver_clear_job_fences(ring);
4730
4731                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4732                 amdgpu_fence_driver_force_completion(ring);
4733         }
4734
4735         amdgpu_fence_driver_isr_toggle(adev, false);
4736
4737         if (job && job->vm)
4738                 drm_sched_increase_karma(&job->base);
4739
4740         r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4741         /* If reset handler not implemented, continue; otherwise return */
4742         if (r == -ENOSYS)
4743                 r = 0;
4744         else
4745                 return r;
4746
4747         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4748         if (!amdgpu_sriov_vf(adev)) {
4749
4750                 if (!need_full_reset)
4751                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4752
4753                 if (!need_full_reset && amdgpu_gpu_recovery) {
4754                         amdgpu_device_ip_pre_soft_reset(adev);
4755                         r = amdgpu_device_ip_soft_reset(adev);
4756                         amdgpu_device_ip_post_soft_reset(adev);
4757                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4758                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4759                                 need_full_reset = true;
4760                         }
4761                 }
4762
4763                 if (need_full_reset)
4764                         r = amdgpu_device_ip_suspend(adev);
4765                 if (need_full_reset)
4766                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4767                 else
4768                         clear_bit(AMDGPU_NEED_FULL_RESET,
4769                                   &reset_context->flags);
4770         }
4771
4772         return r;
4773 }
4774
4775 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4776 {
4777         int i;
4778
4779         lockdep_assert_held(&adev->reset_domain->sem);
4780
4781         for (i = 0; i < adev->num_regs; i++) {
4782                 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4783                 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4784                                              adev->reset_dump_reg_value[i]);
4785         }
4786
4787         return 0;
4788 }
4789
4790 #ifdef CONFIG_DEV_COREDUMP
4791 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4792                 size_t count, void *data, size_t datalen)
4793 {
4794         struct drm_printer p;
4795         struct amdgpu_device *adev = data;
4796         struct drm_print_iterator iter;
4797         int i;
4798
4799         iter.data = buffer;
4800         iter.offset = 0;
4801         iter.start = offset;
4802         iter.remain = count;
4803
4804         p = drm_coredump_printer(&iter);
4805
4806         drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4807         drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4808         drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4809         drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4810         if (adev->reset_task_info.pid)
4811                 drm_printf(&p, "process_name: %s PID: %d\n",
4812                            adev->reset_task_info.process_name,
4813                            adev->reset_task_info.pid);
4814
4815         if (adev->reset_vram_lost)
4816                 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4817         if (adev->num_regs) {
4818                 drm_printf(&p, "AMDGPU register dumps:\nOffset:     Value:\n");
4819
4820                 for (i = 0; i < adev->num_regs; i++)
4821                         drm_printf(&p, "0x%08x: 0x%08x\n",
4822                                    adev->reset_dump_reg_list[i],
4823                                    adev->reset_dump_reg_value[i]);
4824         }
4825
4826         return count - iter.remain;
4827 }
4828
4829 static void amdgpu_devcoredump_free(void *data)
4830 {
4831 }
4832
4833 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4834 {
4835         struct drm_device *dev = adev_to_drm(adev);
4836
4837         ktime_get_ts64(&adev->reset_time);
4838         dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4839                       amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4840 }
4841 #endif
4842
4843 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4844                          struct amdgpu_reset_context *reset_context)
4845 {
4846         struct amdgpu_device *tmp_adev = NULL;
4847         bool need_full_reset, skip_hw_reset, vram_lost = false;
4848         int r = 0;
4849         bool gpu_reset_for_dev_remove = 0;
4850
4851         /* Try reset handler method first */
4852         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4853                                     reset_list);
4854         amdgpu_reset_reg_dumps(tmp_adev);
4855
4856         reset_context->reset_device_list = device_list_handle;
4857         r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4858         /* If reset handler not implemented, continue; otherwise return */
4859         if (r == -ENOSYS)
4860                 r = 0;
4861         else
4862                 return r;
4863
4864         /* Reset handler not implemented, use the default method */
4865         need_full_reset =
4866                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4867         skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4868
4869         gpu_reset_for_dev_remove =
4870                 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4871                         test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4872
4873         /*
4874          * ASIC reset has to be done on all XGMI hive nodes ASAP
4875          * to allow proper links negotiation in FW (within 1 sec)
4876          */
4877         if (!skip_hw_reset && need_full_reset) {
4878                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4879                         /* For XGMI run all resets in parallel to speed up the process */
4880                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4881                                 tmp_adev->gmc.xgmi.pending_reset = false;
4882                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4883                                         r = -EALREADY;
4884                         } else
4885                                 r = amdgpu_asic_reset(tmp_adev);
4886
4887                         if (r) {
4888                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4889                                          r, adev_to_drm(tmp_adev)->unique);
4890                                 break;
4891                         }
4892                 }
4893
4894                 /* For XGMI wait for all resets to complete before proceed */
4895                 if (!r) {
4896                         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4897                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4898                                         flush_work(&tmp_adev->xgmi_reset_work);
4899                                         r = tmp_adev->asic_reset_res;
4900                                         if (r)
4901                                                 break;
4902                                 }
4903                         }
4904                 }
4905         }
4906
4907         if (!r && amdgpu_ras_intr_triggered()) {
4908                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4909                         if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4910                             tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4911                                 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4912                 }
4913
4914                 amdgpu_ras_intr_cleared();
4915         }
4916
4917         /* Since the mode1 reset affects base ip blocks, the
4918          * phase1 ip blocks need to be resumed. Otherwise there
4919          * will be a BIOS signature error and the psp bootloader
4920          * can't load kdb on the next amdgpu install.
4921          */
4922         if (gpu_reset_for_dev_remove) {
4923                 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
4924                         amdgpu_device_ip_resume_phase1(tmp_adev);
4925
4926                 goto end;
4927         }
4928
4929         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4930                 if (need_full_reset) {
4931                         /* post card */
4932                         r = amdgpu_device_asic_init(tmp_adev);
4933                         if (r) {
4934                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4935                         } else {
4936                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4937                                 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4938                                 if (r)
4939                                         goto out;
4940
4941                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4942                                 if (r)
4943                                         goto out;
4944
4945                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4946 #ifdef CONFIG_DEV_COREDUMP
4947                                 tmp_adev->reset_vram_lost = vram_lost;
4948                                 memset(&tmp_adev->reset_task_info, 0,
4949                                                 sizeof(tmp_adev->reset_task_info));
4950                                 if (reset_context->job && reset_context->job->vm)
4951                                         tmp_adev->reset_task_info =
4952                                                 reset_context->job->vm->task_info;
4953                                 amdgpu_reset_capture_coredumpm(tmp_adev);
4954 #endif
4955                                 if (vram_lost) {
4956                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4957                                         amdgpu_inc_vram_lost(tmp_adev);
4958                                 }
4959
4960                                 r = amdgpu_device_fw_loading(tmp_adev);
4961                                 if (r)
4962                                         return r;
4963
4964                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4965                                 if (r)
4966                                         goto out;
4967
4968                                 if (vram_lost)
4969                                         amdgpu_device_fill_reset_magic(tmp_adev);
4970
4971                                 /*
4972                                  * Add this ASIC as tracked as reset was already
4973                                  * complete successfully.
4974                                  */
4975                                 amdgpu_register_gpu_instance(tmp_adev);
4976
4977                                 if (!reset_context->hive &&
4978                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4979                                         amdgpu_xgmi_add_device(tmp_adev);
4980
4981                                 r = amdgpu_device_ip_late_init(tmp_adev);
4982                                 if (r)
4983                                         goto out;
4984
4985                                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4986
4987                                 /*
4988                                  * The GPU enters bad state once faulty pages
4989                                  * by ECC has reached the threshold, and ras
4990                                  * recovery is scheduled next. So add one check
4991                                  * here to break recovery if it indeed exceeds
4992                                  * bad page threshold, and remind user to
4993                                  * retire this GPU or setting one bigger
4994                                  * bad_page_threshold value to fix this once
4995                                  * probing driver again.
4996                                  */
4997                                 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4998                                         /* must succeed. */
4999                                         amdgpu_ras_resume(tmp_adev);
5000                                 } else {
5001                                         r = -EINVAL;
5002                                         goto out;
5003                                 }
5004
5005                                 /* Update PSP FW topology after reset */
5006                                 if (reset_context->hive &&
5007                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5008                                         r = amdgpu_xgmi_update_topology(
5009                                                 reset_context->hive, tmp_adev);
5010                         }
5011                 }
5012
5013 out:
5014                 if (!r) {
5015                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5016                         r = amdgpu_ib_ring_tests(tmp_adev);
5017                         if (r) {
5018                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5019                                 need_full_reset = true;
5020                                 r = -EAGAIN;
5021                                 goto end;
5022                         }
5023                 }
5024
5025                 if (!r)
5026                         r = amdgpu_device_recover_vram(tmp_adev);
5027                 else
5028                         tmp_adev->asic_reset_res = r;
5029         }
5030
5031 end:
5032         if (need_full_reset)
5033                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5034         else
5035                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5036         return r;
5037 }
5038
5039 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5040 {
5041
5042         switch (amdgpu_asic_reset_method(adev)) {
5043         case AMD_RESET_METHOD_MODE1:
5044                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5045                 break;
5046         case AMD_RESET_METHOD_MODE2:
5047                 adev->mp1_state = PP_MP1_STATE_RESET;
5048                 break;
5049         default:
5050                 adev->mp1_state = PP_MP1_STATE_NONE;
5051                 break;
5052         }
5053 }
5054
5055 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5056 {
5057         amdgpu_vf_error_trans_all(adev);
5058         adev->mp1_state = PP_MP1_STATE_NONE;
5059 }
5060
5061 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5062 {
5063         struct pci_dev *p = NULL;
5064
5065         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5066                         adev->pdev->bus->number, 1);
5067         if (p) {
5068                 pm_runtime_enable(&(p->dev));
5069                 pm_runtime_resume(&(p->dev));
5070         }
5071
5072         pci_dev_put(p);
5073 }
5074
5075 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5076 {
5077         enum amd_reset_method reset_method;
5078         struct pci_dev *p = NULL;
5079         u64 expires;
5080
5081         /*
5082          * For now, only BACO and mode1 reset are confirmed
5083          * to suffer the audio issue without proper suspended.
5084          */
5085         reset_method = amdgpu_asic_reset_method(adev);
5086         if ((reset_method != AMD_RESET_METHOD_BACO) &&
5087              (reset_method != AMD_RESET_METHOD_MODE1))
5088                 return -EINVAL;
5089
5090         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5091                         adev->pdev->bus->number, 1);
5092         if (!p)
5093                 return -ENODEV;
5094
5095         expires = pm_runtime_autosuspend_expiration(&(p->dev));
5096         if (!expires)
5097                 /*
5098                  * If we cannot get the audio device autosuspend delay,
5099                  * a fixed 4S interval will be used. Considering 3S is
5100                  * the audio controller default autosuspend delay setting.
5101                  * 4S used here is guaranteed to cover that.
5102                  */
5103                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5104
5105         while (!pm_runtime_status_suspended(&(p->dev))) {
5106                 if (!pm_runtime_suspend(&(p->dev)))
5107                         break;
5108
5109                 if (expires < ktime_get_mono_fast_ns()) {
5110                         dev_warn(adev->dev, "failed to suspend display audio\n");
5111                         pci_dev_put(p);
5112                         /* TODO: abort the succeeding gpu reset? */
5113                         return -ETIMEDOUT;
5114                 }
5115         }
5116
5117         pm_runtime_disable(&(p->dev));
5118
5119         pci_dev_put(p);
5120         return 0;
5121 }
5122
5123 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5124 {
5125         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5126
5127 #if defined(CONFIG_DEBUG_FS)
5128         if (!amdgpu_sriov_vf(adev))
5129                 cancel_work(&adev->reset_work);
5130 #endif
5131
5132         if (adev->kfd.dev)
5133                 cancel_work(&adev->kfd.reset_work);
5134
5135         if (amdgpu_sriov_vf(adev))
5136                 cancel_work(&adev->virt.flr_work);
5137
5138         if (con && adev->ras_enabled)
5139                 cancel_work(&con->recovery_work);
5140
5141 }
5142
5143 /**
5144  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5145  *
5146  * @adev: amdgpu_device pointer
5147  * @job: which job trigger hang
5148  *
5149  * Attempt to reset the GPU if it has hung (all asics).
5150  * Attempt to do soft-reset or full-reset and reinitialize Asic
5151  * Returns 0 for success or an error on failure.
5152  */
5153
5154 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5155                               struct amdgpu_job *job,
5156                               struct amdgpu_reset_context *reset_context)
5157 {
5158         struct list_head device_list, *device_list_handle =  NULL;
5159         bool job_signaled = false;
5160         struct amdgpu_hive_info *hive = NULL;
5161         struct amdgpu_device *tmp_adev = NULL;
5162         int i, r = 0;
5163         bool need_emergency_restart = false;
5164         bool audio_suspended = false;
5165         bool gpu_reset_for_dev_remove = false;
5166
5167         gpu_reset_for_dev_remove =
5168                         test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5169                                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5170
5171         /*
5172          * Special case: RAS triggered and full reset isn't supported
5173          */
5174         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5175
5176         /*
5177          * Flush RAM to disk so that after reboot
5178          * the user can read log and see why the system rebooted.
5179          */
5180         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5181                 DRM_WARN("Emergency reboot.");
5182
5183                 ksys_sync_helper();
5184                 emergency_restart();
5185         }
5186
5187         dev_info(adev->dev, "GPU %s begin!\n",
5188                 need_emergency_restart ? "jobs stop":"reset");
5189
5190         if (!amdgpu_sriov_vf(adev))
5191                 hive = amdgpu_get_xgmi_hive(adev);
5192         if (hive)
5193                 mutex_lock(&hive->hive_lock);
5194
5195         reset_context->job = job;
5196         reset_context->hive = hive;
5197         /*
5198          * Build list of devices to reset.
5199          * In case we are in XGMI hive mode, resort the device list
5200          * to put adev in the 1st position.
5201          */
5202         INIT_LIST_HEAD(&device_list);
5203         if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5204                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5205                         list_add_tail(&tmp_adev->reset_list, &device_list);
5206                         if (gpu_reset_for_dev_remove && adev->shutdown)
5207                                 tmp_adev->shutdown = true;
5208                 }
5209                 if (!list_is_first(&adev->reset_list, &device_list))
5210                         list_rotate_to_front(&adev->reset_list, &device_list);
5211                 device_list_handle = &device_list;
5212         } else {
5213                 list_add_tail(&adev->reset_list, &device_list);
5214                 device_list_handle = &device_list;
5215         }
5216
5217         /* We need to lock reset domain only once both for XGMI and single device */
5218         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5219                                     reset_list);
5220         amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5221
5222         /* block all schedulers and reset given job's ring */
5223         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5224
5225                 amdgpu_device_set_mp1_state(tmp_adev);
5226
5227                 /*
5228                  * Try to put the audio codec into suspend state
5229                  * before gpu reset started.
5230                  *
5231                  * Due to the power domain of the graphics device
5232                  * is shared with AZ power domain. Without this,
5233                  * we may change the audio hardware from behind
5234                  * the audio driver's back. That will trigger
5235                  * some audio codec errors.
5236                  */
5237                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5238                         audio_suspended = true;
5239
5240                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5241
5242                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5243
5244                 if (!amdgpu_sriov_vf(tmp_adev))
5245                         amdgpu_amdkfd_pre_reset(tmp_adev);
5246
5247                 /*
5248                  * Mark these ASICs to be reseted as untracked first
5249                  * And add them back after reset completed
5250                  */
5251                 amdgpu_unregister_gpu_instance(tmp_adev);
5252
5253                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5254
5255                 /* disable ras on ALL IPs */
5256                 if (!need_emergency_restart &&
5257                       amdgpu_device_ip_need_full_reset(tmp_adev))
5258                         amdgpu_ras_suspend(tmp_adev);
5259
5260                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5261                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5262
5263                         if (!ring || !ring->sched.thread)
5264                                 continue;
5265
5266                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5267
5268                         if (need_emergency_restart)
5269                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5270                 }
5271                 atomic_inc(&tmp_adev->gpu_reset_counter);
5272         }
5273
5274         if (need_emergency_restart)
5275                 goto skip_sched_resume;
5276
5277         /*
5278          * Must check guilty signal here since after this point all old
5279          * HW fences are force signaled.
5280          *
5281          * job->base holds a reference to parent fence
5282          */
5283         if (job && dma_fence_is_signaled(&job->hw_fence)) {
5284                 job_signaled = true;
5285                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5286                 goto skip_hw_reset;
5287         }
5288
5289 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
5290         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5291                 if (gpu_reset_for_dev_remove) {
5292                         /* Workaroud for ASICs need to disable SMC first */
5293                         amdgpu_device_smu_fini_early(tmp_adev);
5294                 }
5295                 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5296                 /*TODO Should we stop ?*/
5297                 if (r) {
5298                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5299                                   r, adev_to_drm(tmp_adev)->unique);
5300                         tmp_adev->asic_reset_res = r;
5301                 }
5302
5303                 /*
5304                  * Drop all pending non scheduler resets. Scheduler resets
5305                  * were already dropped during drm_sched_stop
5306                  */
5307                 amdgpu_device_stop_pending_resets(tmp_adev);
5308         }
5309
5310         /* Actual ASIC resets if needed.*/
5311         /* Host driver will handle XGMI hive reset for SRIOV */
5312         if (amdgpu_sriov_vf(adev)) {
5313                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5314                 if (r)
5315                         adev->asic_reset_res = r;
5316
5317                 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5318                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5319                         amdgpu_ras_resume(adev);
5320         } else {
5321                 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5322                 if (r && r == -EAGAIN)
5323                         goto retry;
5324
5325                 if (!r && gpu_reset_for_dev_remove)
5326                         goto recover_end;
5327         }
5328
5329 skip_hw_reset:
5330
5331         /* Post ASIC reset for all devs .*/
5332         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5333
5334                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5335                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5336
5337                         if (!ring || !ring->sched.thread)
5338                                 continue;
5339
5340                         drm_sched_start(&ring->sched, true);
5341                 }
5342
5343                 if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
5344                         amdgpu_mes_self_test(tmp_adev);
5345
5346                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5347                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5348                 }
5349
5350                 if (tmp_adev->asic_reset_res)
5351                         r = tmp_adev->asic_reset_res;
5352
5353                 tmp_adev->asic_reset_res = 0;
5354
5355                 if (r) {
5356                         /* bad news, how to tell it to userspace ? */
5357                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5358                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5359                 } else {
5360                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5361                         if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5362                                 DRM_WARN("smart shift update failed\n");
5363                 }
5364         }
5365
5366 skip_sched_resume:
5367         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5368                 /* unlock kfd: SRIOV would do it separately */
5369                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5370                         amdgpu_amdkfd_post_reset(tmp_adev);
5371
5372                 /* kfd_post_reset will do nothing if kfd device is not initialized,
5373                  * need to bring up kfd here if it's not be initialized before
5374                  */
5375                 if (!adev->kfd.init_complete)
5376                         amdgpu_amdkfd_device_init(adev);
5377
5378                 if (audio_suspended)
5379                         amdgpu_device_resume_display_audio(tmp_adev);
5380
5381                 amdgpu_device_unset_mp1_state(tmp_adev);
5382
5383                 amdgpu_ras_set_error_query_ready(tmp_adev, true);
5384         }
5385
5386 recover_end:
5387         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5388                                             reset_list);
5389         amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5390
5391         if (hive) {
5392                 mutex_unlock(&hive->hive_lock);
5393                 amdgpu_put_xgmi_hive(hive);
5394         }
5395
5396         if (r)
5397                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5398
5399         atomic_set(&adev->reset_domain->reset_res, r);
5400         return r;
5401 }
5402
5403 /**
5404  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5405  *
5406  * @adev: amdgpu_device pointer
5407  *
5408  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5409  * and lanes) of the slot the device is in. Handles APUs and
5410  * virtualized environments where PCIE config space may not be available.
5411  */
5412 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5413 {
5414         struct pci_dev *pdev;
5415         enum pci_bus_speed speed_cap, platform_speed_cap;
5416         enum pcie_link_width platform_link_width;
5417
5418         if (amdgpu_pcie_gen_cap)
5419                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5420
5421         if (amdgpu_pcie_lane_cap)
5422                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5423
5424         /* covers APUs as well */
5425         if (pci_is_root_bus(adev->pdev->bus)) {
5426                 if (adev->pm.pcie_gen_mask == 0)
5427                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5428                 if (adev->pm.pcie_mlw_mask == 0)
5429                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5430                 return;
5431         }
5432
5433         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5434                 return;
5435
5436         pcie_bandwidth_available(adev->pdev, NULL,
5437                                  &platform_speed_cap, &platform_link_width);
5438
5439         if (adev->pm.pcie_gen_mask == 0) {
5440                 /* asic caps */
5441                 pdev = adev->pdev;
5442                 speed_cap = pcie_get_speed_cap(pdev);
5443                 if (speed_cap == PCI_SPEED_UNKNOWN) {
5444                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5445                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5446                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5447                 } else {
5448                         if (speed_cap == PCIE_SPEED_32_0GT)
5449                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5450                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5451                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5452                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5453                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5454                         else if (speed_cap == PCIE_SPEED_16_0GT)
5455                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5456                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5457                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5458                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5459                         else if (speed_cap == PCIE_SPEED_8_0GT)
5460                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5461                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5462                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5463                         else if (speed_cap == PCIE_SPEED_5_0GT)
5464                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5465                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5466                         else
5467                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5468                 }
5469                 /* platform caps */
5470                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5471                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5472                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5473                 } else {
5474                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
5475                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5476                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5477                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5478                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5479                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5480                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5481                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5482                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5483                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5484                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5485                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5486                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5487                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5488                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5489                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5490                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5491                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5492                         else
5493                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5494
5495                 }
5496         }
5497         if (adev->pm.pcie_mlw_mask == 0) {
5498                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5499                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5500                 } else {
5501                         switch (platform_link_width) {
5502                         case PCIE_LNK_X32:
5503                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5504                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5505                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5506                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5507                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5508                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5509                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5510                                 break;
5511                         case PCIE_LNK_X16:
5512                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5513                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5514                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5515                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5516                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5517                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5518                                 break;
5519                         case PCIE_LNK_X12:
5520                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5521                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5522                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5523                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5524                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5525                                 break;
5526                         case PCIE_LNK_X8:
5527                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5528                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5529                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5530                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5531                                 break;
5532                         case PCIE_LNK_X4:
5533                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5534                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5535                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5536                                 break;
5537                         case PCIE_LNK_X2:
5538                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5539                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5540                                 break;
5541                         case PCIE_LNK_X1:
5542                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5543                                 break;
5544                         default:
5545                                 break;
5546                         }
5547                 }
5548         }
5549 }
5550
5551 /**
5552  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5553  *
5554  * @adev: amdgpu_device pointer
5555  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5556  *
5557  * Return true if @peer_adev can access (DMA) @adev through the PCIe
5558  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5559  * @peer_adev.
5560  */
5561 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5562                                       struct amdgpu_device *peer_adev)
5563 {
5564 #ifdef CONFIG_HSA_AMD_P2P
5565         uint64_t address_mask = peer_adev->dev->dma_mask ?
5566                 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5567         resource_size_t aper_limit =
5568                 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5569         bool p2p_access =
5570                 !adev->gmc.xgmi.connected_to_cpu &&
5571                 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5572
5573         return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5574                 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5575                 !(adev->gmc.aper_base & address_mask ||
5576                   aper_limit & address_mask));
5577 #else
5578         return false;
5579 #endif
5580 }
5581
5582 int amdgpu_device_baco_enter(struct drm_device *dev)
5583 {
5584         struct amdgpu_device *adev = drm_to_adev(dev);
5585         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5586
5587         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5588                 return -ENOTSUPP;
5589
5590         if (ras && adev->ras_enabled &&
5591             adev->nbio.funcs->enable_doorbell_interrupt)
5592                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5593
5594         return amdgpu_dpm_baco_enter(adev);
5595 }
5596
5597 int amdgpu_device_baco_exit(struct drm_device *dev)
5598 {
5599         struct amdgpu_device *adev = drm_to_adev(dev);
5600         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5601         int ret = 0;
5602
5603         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5604                 return -ENOTSUPP;
5605
5606         ret = amdgpu_dpm_baco_exit(adev);
5607         if (ret)
5608                 return ret;
5609
5610         if (ras && adev->ras_enabled &&
5611             adev->nbio.funcs->enable_doorbell_interrupt)
5612                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5613
5614         if (amdgpu_passthrough(adev) &&
5615             adev->nbio.funcs->clear_doorbell_interrupt)
5616                 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5617
5618         return 0;
5619 }
5620
5621 /**
5622  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5623  * @pdev: PCI device struct
5624  * @state: PCI channel state
5625  *
5626  * Description: Called when a PCI error is detected.
5627  *
5628  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5629  */
5630 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5631 {
5632         struct drm_device *dev = pci_get_drvdata(pdev);
5633         struct amdgpu_device *adev = drm_to_adev(dev);
5634         int i;
5635
5636         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5637
5638         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5639                 DRM_WARN("No support for XGMI hive yet...");
5640                 return PCI_ERS_RESULT_DISCONNECT;
5641         }
5642
5643         adev->pci_channel_state = state;
5644
5645         switch (state) {
5646         case pci_channel_io_normal:
5647                 return PCI_ERS_RESULT_CAN_RECOVER;
5648         /* Fatal error, prepare for slot reset */
5649         case pci_channel_io_frozen:
5650                 /*
5651                  * Locking adev->reset_domain->sem will prevent any external access
5652                  * to GPU during PCI error recovery
5653                  */
5654                 amdgpu_device_lock_reset_domain(adev->reset_domain);
5655                 amdgpu_device_set_mp1_state(adev);
5656
5657                 /*
5658                  * Block any work scheduling as we do for regular GPU reset
5659                  * for the duration of the recovery
5660                  */
5661                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5662                         struct amdgpu_ring *ring = adev->rings[i];
5663
5664                         if (!ring || !ring->sched.thread)
5665                                 continue;
5666
5667                         drm_sched_stop(&ring->sched, NULL);
5668                 }
5669                 atomic_inc(&adev->gpu_reset_counter);
5670                 return PCI_ERS_RESULT_NEED_RESET;
5671         case pci_channel_io_perm_failure:
5672                 /* Permanent error, prepare for device removal */
5673                 return PCI_ERS_RESULT_DISCONNECT;
5674         }
5675
5676         return PCI_ERS_RESULT_NEED_RESET;
5677 }
5678
5679 /**
5680  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5681  * @pdev: pointer to PCI device
5682  */
5683 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5684 {
5685
5686         DRM_INFO("PCI error: mmio enabled callback!!\n");
5687
5688         /* TODO - dump whatever for debugging purposes */
5689
5690         /* This called only if amdgpu_pci_error_detected returns
5691          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5692          * works, no need to reset slot.
5693          */
5694
5695         return PCI_ERS_RESULT_RECOVERED;
5696 }
5697
5698 /**
5699  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5700  * @pdev: PCI device struct
5701  *
5702  * Description: This routine is called by the pci error recovery
5703  * code after the PCI slot has been reset, just before we
5704  * should resume normal operations.
5705  */
5706 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5707 {
5708         struct drm_device *dev = pci_get_drvdata(pdev);
5709         struct amdgpu_device *adev = drm_to_adev(dev);
5710         int r, i;
5711         struct amdgpu_reset_context reset_context;
5712         u32 memsize;
5713         struct list_head device_list;
5714
5715         DRM_INFO("PCI error: slot reset callback!!\n");
5716
5717         memset(&reset_context, 0, sizeof(reset_context));
5718
5719         INIT_LIST_HEAD(&device_list);
5720         list_add_tail(&adev->reset_list, &device_list);
5721
5722         /* wait for asic to come out of reset */
5723         msleep(500);
5724
5725         /* Restore PCI confspace */
5726         amdgpu_device_load_pci_state(pdev);
5727
5728         /* confirm  ASIC came out of reset */
5729         for (i = 0; i < adev->usec_timeout; i++) {
5730                 memsize = amdgpu_asic_get_config_memsize(adev);
5731
5732                 if (memsize != 0xffffffff)
5733                         break;
5734                 udelay(1);
5735         }
5736         if (memsize == 0xffffffff) {
5737                 r = -ETIME;
5738                 goto out;
5739         }
5740
5741         reset_context.method = AMD_RESET_METHOD_NONE;
5742         reset_context.reset_req_dev = adev;
5743         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5744         set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5745
5746         adev->no_hw_access = true;
5747         r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5748         adev->no_hw_access = false;
5749         if (r)
5750                 goto out;
5751
5752         r = amdgpu_do_asic_reset(&device_list, &reset_context);
5753
5754 out:
5755         if (!r) {
5756                 if (amdgpu_device_cache_pci_state(adev->pdev))
5757                         pci_restore_state(adev->pdev);
5758
5759                 DRM_INFO("PCIe error recovery succeeded\n");
5760         } else {
5761                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5762                 amdgpu_device_unset_mp1_state(adev);
5763                 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5764         }
5765
5766         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5767 }
5768
5769 /**
5770  * amdgpu_pci_resume() - resume normal ops after PCI reset
5771  * @pdev: pointer to PCI device
5772  *
5773  * Called when the error recovery driver tells us that its
5774  * OK to resume normal operation.
5775  */
5776 void amdgpu_pci_resume(struct pci_dev *pdev)
5777 {
5778         struct drm_device *dev = pci_get_drvdata(pdev);
5779         struct amdgpu_device *adev = drm_to_adev(dev);
5780         int i;
5781
5782
5783         DRM_INFO("PCI error: resume callback!!\n");
5784
5785         /* Only continue execution for the case of pci_channel_io_frozen */
5786         if (adev->pci_channel_state != pci_channel_io_frozen)
5787                 return;
5788
5789         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5790                 struct amdgpu_ring *ring = adev->rings[i];
5791
5792                 if (!ring || !ring->sched.thread)
5793                         continue;
5794
5795                 drm_sched_start(&ring->sched, true);
5796         }
5797
5798         amdgpu_device_unset_mp1_state(adev);
5799         amdgpu_device_unlock_reset_domain(adev->reset_domain);
5800 }
5801
5802 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5803 {
5804         struct drm_device *dev = pci_get_drvdata(pdev);
5805         struct amdgpu_device *adev = drm_to_adev(dev);
5806         int r;
5807
5808         r = pci_save_state(pdev);
5809         if (!r) {
5810                 kfree(adev->pci_state);
5811
5812                 adev->pci_state = pci_store_saved_state(pdev);
5813
5814                 if (!adev->pci_state) {
5815                         DRM_ERROR("Failed to store PCI saved state");
5816                         return false;
5817                 }
5818         } else {
5819                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5820                 return false;
5821         }
5822
5823         return true;
5824 }
5825
5826 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5827 {
5828         struct drm_device *dev = pci_get_drvdata(pdev);
5829         struct amdgpu_device *adev = drm_to_adev(dev);
5830         int r;
5831
5832         if (!adev->pci_state)
5833                 return false;
5834
5835         r = pci_load_saved_state(pdev, adev->pci_state);
5836
5837         if (!r) {
5838                 pci_restore_state(pdev);
5839         } else {
5840                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5841                 return false;
5842         }
5843
5844         return true;
5845 }
5846
5847 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5848                 struct amdgpu_ring *ring)
5849 {
5850 #ifdef CONFIG_X86_64
5851         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5852                 return;
5853 #endif
5854         if (adev->gmc.xgmi.connected_to_cpu)
5855                 return;
5856
5857         if (ring && ring->funcs->emit_hdp_flush)
5858                 amdgpu_ring_emit_hdp_flush(ring);
5859         else
5860                 amdgpu_asic_flush_hdp(adev, ring);
5861 }
5862
5863 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5864                 struct amdgpu_ring *ring)
5865 {
5866 #ifdef CONFIG_X86_64
5867         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5868                 return;
5869 #endif
5870         if (adev->gmc.xgmi.connected_to_cpu)
5871                 return;
5872
5873         amdgpu_asic_invalidate_hdp(adev, ring);
5874 }
5875
5876 int amdgpu_in_reset(struct amdgpu_device *adev)
5877 {
5878         return atomic_read(&adev->reset_domain->in_gpu_reset);
5879         }
5880         
5881 /**
5882  * amdgpu_device_halt() - bring hardware to some kind of halt state
5883  *
5884  * @adev: amdgpu_device pointer
5885  *
5886  * Bring hardware to some kind of halt state so that no one can touch it
5887  * any more. It will help to maintain error context when error occurred.
5888  * Compare to a simple hang, the system will keep stable at least for SSH
5889  * access. Then it should be trivial to inspect the hardware state and
5890  * see what's going on. Implemented as following:
5891  *
5892  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5893  *    clears all CPU mappings to device, disallows remappings through page faults
5894  * 2. amdgpu_irq_disable_all() disables all interrupts
5895  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5896  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5897  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5898  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5899  *    flush any in flight DMA operations
5900  */
5901 void amdgpu_device_halt(struct amdgpu_device *adev)
5902 {
5903         struct pci_dev *pdev = adev->pdev;
5904         struct drm_device *ddev = adev_to_drm(adev);
5905
5906         drm_dev_unplug(ddev);
5907
5908         amdgpu_irq_disable_all(adev);
5909
5910         amdgpu_fence_driver_hw_fini(adev);
5911
5912         adev->no_hw_access = true;
5913
5914         amdgpu_device_unmap_mmio(adev);
5915
5916         pci_disable_device(pdev);
5917         pci_wait_for_pending_transaction(pdev);
5918 }
5919
5920 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5921                                 u32 reg)
5922 {
5923         unsigned long flags, address, data;
5924         u32 r;
5925
5926         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5927         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5928
5929         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5930         WREG32(address, reg * 4);
5931         (void)RREG32(address);
5932         r = RREG32(data);
5933         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5934         return r;
5935 }
5936
5937 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5938                                 u32 reg, u32 v)
5939 {
5940         unsigned long flags, address, data;
5941
5942         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5943         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5944
5945         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5946         WREG32(address, reg * 4);
5947         (void)RREG32(address);
5948         WREG32(data, v);
5949         (void)RREG32(data);
5950         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5951 }
5952
5953 /**
5954  * amdgpu_device_switch_gang - switch to a new gang
5955  * @adev: amdgpu_device pointer
5956  * @gang: the gang to switch to
5957  *
5958  * Try to switch to a new gang.
5959  * Returns: NULL if we switched to the new gang or a reference to the current
5960  * gang leader.
5961  */
5962 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
5963                                             struct dma_fence *gang)
5964 {
5965         struct dma_fence *old = NULL;
5966
5967         do {
5968                 dma_fence_put(old);
5969                 rcu_read_lock();
5970                 old = dma_fence_get_rcu_safe(&adev->gang_submit);
5971                 rcu_read_unlock();
5972
5973                 if (old == gang)
5974                         break;
5975
5976                 if (!dma_fence_is_signaled(old))
5977                         return old;
5978
5979         } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
5980                          old, gang) != old);
5981
5982         dma_fence_put(old);
5983         return NULL;
5984 }
5985
5986 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
5987 {
5988         switch (adev->asic_type) {
5989 #ifdef CONFIG_DRM_AMDGPU_SI
5990         case CHIP_HAINAN:
5991 #endif
5992         case CHIP_TOPAZ:
5993                 /* chips with no display hardware */
5994                 return false;
5995 #ifdef CONFIG_DRM_AMDGPU_SI
5996         case CHIP_TAHITI:
5997         case CHIP_PITCAIRN:
5998         case CHIP_VERDE:
5999         case CHIP_OLAND:
6000 #endif
6001 #ifdef CONFIG_DRM_AMDGPU_CIK
6002         case CHIP_BONAIRE:
6003         case CHIP_HAWAII:
6004         case CHIP_KAVERI:
6005         case CHIP_KABINI:
6006         case CHIP_MULLINS:
6007 #endif
6008         case CHIP_TONGA:
6009         case CHIP_FIJI:
6010         case CHIP_POLARIS10:
6011         case CHIP_POLARIS11:
6012         case CHIP_POLARIS12:
6013         case CHIP_VEGAM:
6014         case CHIP_CARRIZO:
6015         case CHIP_STONEY:
6016                 /* chips with display hardware */
6017                 return true;
6018         default:
6019                 /* IP discovery */
6020                 if (!adev->ip_versions[DCE_HWIP][0] ||
6021                     (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6022                         return false;
6023                 return true;
6024         }
6025 }
This page took 0.390178 seconds and 4 git commands to generate.