]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
Merge tag 'drm-misc-next-2023-01-19' of git://anongit.freedesktop.org/drm/drm-misc...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38
39 #include <drm/drm_aperture.h>
40 #include <drm/drm_atomic_helper.h>
41 #include <drm/drm_crtc_helper.h>
42 #include <drm/drm_fb_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/amdgpu_drm.h>
45 #include <linux/vgaarb.h>
46 #include <linux/vga_switcheroo.h>
47 #include <linux/efi.h>
48 #include "amdgpu.h"
49 #include "amdgpu_trace.h"
50 #include "amdgpu_i2c.h"
51 #include "atom.h"
52 #include "amdgpu_atombios.h"
53 #include "amdgpu_atomfirmware.h"
54 #include "amd_pcie.h"
55 #ifdef CONFIG_DRM_AMDGPU_SI
56 #include "si.h"
57 #endif
58 #ifdef CONFIG_DRM_AMDGPU_CIK
59 #include "cik.h"
60 #endif
61 #include "vi.h"
62 #include "soc15.h"
63 #include "nv.h"
64 #include "bif/bif_4_1_d.h"
65 #include <linux/firmware.h>
66 #include "amdgpu_vf_error.h"
67
68 #include "amdgpu_amdkfd.h"
69 #include "amdgpu_pm.h"
70
71 #include "amdgpu_xgmi.h"
72 #include "amdgpu_ras.h"
73 #include "amdgpu_pmu.h"
74 #include "amdgpu_fru_eeprom.h"
75 #include "amdgpu_reset.h"
76
77 #include <linux/suspend.h>
78 #include <drm/task_barrier.h>
79 #include <linux/pm_runtime.h>
80
81 #include <drm/drm_drv.h>
82
83 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
87 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
88 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
89 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
90
91 #define AMDGPU_RESUME_MS                2000
92 #define AMDGPU_MAX_RETRY_LIMIT          2
93 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
94
95 static const struct drm_driver amdgpu_kms_driver;
96
97 const char *amdgpu_asic_name[] = {
98         "TAHITI",
99         "PITCAIRN",
100         "VERDE",
101         "OLAND",
102         "HAINAN",
103         "BONAIRE",
104         "KAVERI",
105         "KABINI",
106         "HAWAII",
107         "MULLINS",
108         "TOPAZ",
109         "TONGA",
110         "FIJI",
111         "CARRIZO",
112         "STONEY",
113         "POLARIS10",
114         "POLARIS11",
115         "POLARIS12",
116         "VEGAM",
117         "VEGA10",
118         "VEGA12",
119         "VEGA20",
120         "RAVEN",
121         "ARCTURUS",
122         "RENOIR",
123         "ALDEBARAN",
124         "NAVI10",
125         "CYAN_SKILLFISH",
126         "NAVI14",
127         "NAVI12",
128         "SIENNA_CICHLID",
129         "NAVY_FLOUNDER",
130         "VANGOGH",
131         "DIMGREY_CAVEFISH",
132         "BEIGE_GOBY",
133         "YELLOW_CARP",
134         "IP DISCOVERY",
135         "LAST",
136 };
137
138 /**
139  * DOC: pcie_replay_count
140  *
141  * The amdgpu driver provides a sysfs API for reporting the total number
142  * of PCIe replays (NAKs)
143  * The file pcie_replay_count is used for this and returns the total
144  * number of replays as a sum of the NAKs generated and NAKs received
145  */
146
147 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
148                 struct device_attribute *attr, char *buf)
149 {
150         struct drm_device *ddev = dev_get_drvdata(dev);
151         struct amdgpu_device *adev = drm_to_adev(ddev);
152         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
153
154         return sysfs_emit(buf, "%llu\n", cnt);
155 }
156
157 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
158                 amdgpu_device_get_pcie_replay_count, NULL);
159
160 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
161
162 /**
163  * DOC: product_name
164  *
165  * The amdgpu driver provides a sysfs API for reporting the product name
166  * for the device
167  * The file serial_number is used for this and returns the product name
168  * as returned from the FRU.
169  * NOTE: This is only available for certain server cards
170  */
171
172 static ssize_t amdgpu_device_get_product_name(struct device *dev,
173                 struct device_attribute *attr, char *buf)
174 {
175         struct drm_device *ddev = dev_get_drvdata(dev);
176         struct amdgpu_device *adev = drm_to_adev(ddev);
177
178         return sysfs_emit(buf, "%s\n", adev->product_name);
179 }
180
181 static DEVICE_ATTR(product_name, S_IRUGO,
182                 amdgpu_device_get_product_name, NULL);
183
184 /**
185  * DOC: product_number
186  *
187  * The amdgpu driver provides a sysfs API for reporting the part number
188  * for the device
189  * The file serial_number is used for this and returns the part number
190  * as returned from the FRU.
191  * NOTE: This is only available for certain server cards
192  */
193
194 static ssize_t amdgpu_device_get_product_number(struct device *dev,
195                 struct device_attribute *attr, char *buf)
196 {
197         struct drm_device *ddev = dev_get_drvdata(dev);
198         struct amdgpu_device *adev = drm_to_adev(ddev);
199
200         return sysfs_emit(buf, "%s\n", adev->product_number);
201 }
202
203 static DEVICE_ATTR(product_number, S_IRUGO,
204                 amdgpu_device_get_product_number, NULL);
205
206 /**
207  * DOC: serial_number
208  *
209  * The amdgpu driver provides a sysfs API for reporting the serial number
210  * for the device
211  * The file serial_number is used for this and returns the serial number
212  * as returned from the FRU.
213  * NOTE: This is only available for certain server cards
214  */
215
216 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
217                 struct device_attribute *attr, char *buf)
218 {
219         struct drm_device *ddev = dev_get_drvdata(dev);
220         struct amdgpu_device *adev = drm_to_adev(ddev);
221
222         return sysfs_emit(buf, "%s\n", adev->serial);
223 }
224
225 static DEVICE_ATTR(serial_number, S_IRUGO,
226                 amdgpu_device_get_serial_number, NULL);
227
228 /**
229  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
230  *
231  * @dev: drm_device pointer
232  *
233  * Returns true if the device is a dGPU with ATPX power control,
234  * otherwise return false.
235  */
236 bool amdgpu_device_supports_px(struct drm_device *dev)
237 {
238         struct amdgpu_device *adev = drm_to_adev(dev);
239
240         if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
241                 return true;
242         return false;
243 }
244
245 /**
246  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
247  *
248  * @dev: drm_device pointer
249  *
250  * Returns true if the device is a dGPU with ACPI power control,
251  * otherwise return false.
252  */
253 bool amdgpu_device_supports_boco(struct drm_device *dev)
254 {
255         struct amdgpu_device *adev = drm_to_adev(dev);
256
257         if (adev->has_pr3 ||
258             ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
259                 return true;
260         return false;
261 }
262
263 /**
264  * amdgpu_device_supports_baco - Does the device support BACO
265  *
266  * @dev: drm_device pointer
267  *
268  * Returns true if the device supporte BACO,
269  * otherwise return false.
270  */
271 bool amdgpu_device_supports_baco(struct drm_device *dev)
272 {
273         struct amdgpu_device *adev = drm_to_adev(dev);
274
275         return amdgpu_asic_supports_baco(adev);
276 }
277
278 /**
279  * amdgpu_device_supports_smart_shift - Is the device dGPU with
280  * smart shift support
281  *
282  * @dev: drm_device pointer
283  *
284  * Returns true if the device is a dGPU with Smart Shift support,
285  * otherwise returns false.
286  */
287 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
288 {
289         return (amdgpu_device_supports_boco(dev) &&
290                 amdgpu_acpi_is_power_shift_control_supported());
291 }
292
293 /*
294  * VRAM access helper functions
295  */
296
297 /**
298  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
299  *
300  * @adev: amdgpu_device pointer
301  * @pos: offset of the buffer in vram
302  * @buf: virtual address of the buffer in system memory
303  * @size: read/write size, sizeof(@buf) must > @size
304  * @write: true - write to vram, otherwise - read from vram
305  */
306 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
307                              void *buf, size_t size, bool write)
308 {
309         unsigned long flags;
310         uint32_t hi = ~0, tmp = 0;
311         uint32_t *data = buf;
312         uint64_t last;
313         int idx;
314
315         if (!drm_dev_enter(adev_to_drm(adev), &idx))
316                 return;
317
318         BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
319
320         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
321         for (last = pos + size; pos < last; pos += 4) {
322                 tmp = pos >> 31;
323
324                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
325                 if (tmp != hi) {
326                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
327                         hi = tmp;
328                 }
329                 if (write)
330                         WREG32_NO_KIQ(mmMM_DATA, *data++);
331                 else
332                         *data++ = RREG32_NO_KIQ(mmMM_DATA);
333         }
334
335         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
336         drm_dev_exit(idx);
337 }
338
339 /**
340  * amdgpu_device_aper_access - access vram by vram aperature
341  *
342  * @adev: amdgpu_device pointer
343  * @pos: offset of the buffer in vram
344  * @buf: virtual address of the buffer in system memory
345  * @size: read/write size, sizeof(@buf) must > @size
346  * @write: true - write to vram, otherwise - read from vram
347  *
348  * The return value means how many bytes have been transferred.
349  */
350 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
351                                  void *buf, size_t size, bool write)
352 {
353 #ifdef CONFIG_64BIT
354         void __iomem *addr;
355         size_t count = 0;
356         uint64_t last;
357
358         if (!adev->mman.aper_base_kaddr)
359                 return 0;
360
361         last = min(pos + size, adev->gmc.visible_vram_size);
362         if (last > pos) {
363                 addr = adev->mman.aper_base_kaddr + pos;
364                 count = last - pos;
365
366                 if (write) {
367                         memcpy_toio(addr, buf, count);
368                         mb();
369                         amdgpu_device_flush_hdp(adev, NULL);
370                 } else {
371                         amdgpu_device_invalidate_hdp(adev, NULL);
372                         mb();
373                         memcpy_fromio(buf, addr, count);
374                 }
375
376         }
377
378         return count;
379 #else
380         return 0;
381 #endif
382 }
383
384 /**
385  * amdgpu_device_vram_access - read/write a buffer in vram
386  *
387  * @adev: amdgpu_device pointer
388  * @pos: offset of the buffer in vram
389  * @buf: virtual address of the buffer in system memory
390  * @size: read/write size, sizeof(@buf) must > @size
391  * @write: true - write to vram, otherwise - read from vram
392  */
393 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
394                                void *buf, size_t size, bool write)
395 {
396         size_t count;
397
398         /* try to using vram apreature to access vram first */
399         count = amdgpu_device_aper_access(adev, pos, buf, size, write);
400         size -= count;
401         if (size) {
402                 /* using MM to access rest vram */
403                 pos += count;
404                 buf += count;
405                 amdgpu_device_mm_access(adev, pos, buf, size, write);
406         }
407 }
408
409 /*
410  * register access helper functions.
411  */
412
413 /* Check if hw access should be skipped because of hotplug or device error */
414 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
415 {
416         if (adev->no_hw_access)
417                 return true;
418
419 #ifdef CONFIG_LOCKDEP
420         /*
421          * This is a bit complicated to understand, so worth a comment. What we assert
422          * here is that the GPU reset is not running on another thread in parallel.
423          *
424          * For this we trylock the read side of the reset semaphore, if that succeeds
425          * we know that the reset is not running in paralell.
426          *
427          * If the trylock fails we assert that we are either already holding the read
428          * side of the lock or are the reset thread itself and hold the write side of
429          * the lock.
430          */
431         if (in_task()) {
432                 if (down_read_trylock(&adev->reset_domain->sem))
433                         up_read(&adev->reset_domain->sem);
434                 else
435                         lockdep_assert_held(&adev->reset_domain->sem);
436         }
437 #endif
438         return false;
439 }
440
441 /**
442  * amdgpu_device_rreg - read a memory mapped IO or indirect register
443  *
444  * @adev: amdgpu_device pointer
445  * @reg: dword aligned register offset
446  * @acc_flags: access flags which require special behavior
447  *
448  * Returns the 32 bit value from the offset specified.
449  */
450 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
451                             uint32_t reg, uint32_t acc_flags)
452 {
453         uint32_t ret;
454
455         if (amdgpu_device_skip_hw_access(adev))
456                 return 0;
457
458         if ((reg * 4) < adev->rmmio_size) {
459                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
460                     amdgpu_sriov_runtime(adev) &&
461                     down_read_trylock(&adev->reset_domain->sem)) {
462                         ret = amdgpu_kiq_rreg(adev, reg);
463                         up_read(&adev->reset_domain->sem);
464                 } else {
465                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
466                 }
467         } else {
468                 ret = adev->pcie_rreg(adev, reg * 4);
469         }
470
471         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
472
473         return ret;
474 }
475
476 /*
477  * MMIO register read with bytes helper functions
478  * @offset:bytes offset from MMIO start
479  *
480 */
481
482 /**
483  * amdgpu_mm_rreg8 - read a memory mapped IO register
484  *
485  * @adev: amdgpu_device pointer
486  * @offset: byte aligned register offset
487  *
488  * Returns the 8 bit value from the offset specified.
489  */
490 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
491 {
492         if (amdgpu_device_skip_hw_access(adev))
493                 return 0;
494
495         if (offset < adev->rmmio_size)
496                 return (readb(adev->rmmio + offset));
497         BUG();
498 }
499
500 /*
501  * MMIO register write with bytes helper functions
502  * @offset:bytes offset from MMIO start
503  * @value: the value want to be written to the register
504  *
505 */
506 /**
507  * amdgpu_mm_wreg8 - read a memory mapped IO register
508  *
509  * @adev: amdgpu_device pointer
510  * @offset: byte aligned register offset
511  * @value: 8 bit value to write
512  *
513  * Writes the value specified to the offset specified.
514  */
515 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
516 {
517         if (amdgpu_device_skip_hw_access(adev))
518                 return;
519
520         if (offset < adev->rmmio_size)
521                 writeb(value, adev->rmmio + offset);
522         else
523                 BUG();
524 }
525
526 /**
527  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
528  *
529  * @adev: amdgpu_device pointer
530  * @reg: dword aligned register offset
531  * @v: 32 bit value to write to the register
532  * @acc_flags: access flags which require special behavior
533  *
534  * Writes the value specified to the offset specified.
535  */
536 void amdgpu_device_wreg(struct amdgpu_device *adev,
537                         uint32_t reg, uint32_t v,
538                         uint32_t acc_flags)
539 {
540         if (amdgpu_device_skip_hw_access(adev))
541                 return;
542
543         if ((reg * 4) < adev->rmmio_size) {
544                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
545                     amdgpu_sriov_runtime(adev) &&
546                     down_read_trylock(&adev->reset_domain->sem)) {
547                         amdgpu_kiq_wreg(adev, reg, v);
548                         up_read(&adev->reset_domain->sem);
549                 } else {
550                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
551                 }
552         } else {
553                 adev->pcie_wreg(adev, reg * 4, v);
554         }
555
556         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
557 }
558
559 /**
560  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
561  *
562  * @adev: amdgpu_device pointer
563  * @reg: mmio/rlc register
564  * @v: value to write
565  *
566  * this function is invoked only for the debugfs register access
567  */
568 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
569                              uint32_t reg, uint32_t v)
570 {
571         if (amdgpu_device_skip_hw_access(adev))
572                 return;
573
574         if (amdgpu_sriov_fullaccess(adev) &&
575             adev->gfx.rlc.funcs &&
576             adev->gfx.rlc.funcs->is_rlcg_access_range) {
577                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
578                         return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
579         } else if ((reg * 4) >= adev->rmmio_size) {
580                 adev->pcie_wreg(adev, reg * 4, v);
581         } else {
582                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
583         }
584 }
585
586 /**
587  * amdgpu_mm_rdoorbell - read a doorbell dword
588  *
589  * @adev: amdgpu_device pointer
590  * @index: doorbell index
591  *
592  * Returns the value in the doorbell aperture at the
593  * requested doorbell index (CIK).
594  */
595 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
596 {
597         if (amdgpu_device_skip_hw_access(adev))
598                 return 0;
599
600         if (index < adev->doorbell.num_doorbells) {
601                 return readl(adev->doorbell.ptr + index);
602         } else {
603                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
604                 return 0;
605         }
606 }
607
608 /**
609  * amdgpu_mm_wdoorbell - write a doorbell dword
610  *
611  * @adev: amdgpu_device pointer
612  * @index: doorbell index
613  * @v: value to write
614  *
615  * Writes @v to the doorbell aperture at the
616  * requested doorbell index (CIK).
617  */
618 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
619 {
620         if (amdgpu_device_skip_hw_access(adev))
621                 return;
622
623         if (index < adev->doorbell.num_doorbells) {
624                 writel(v, adev->doorbell.ptr + index);
625         } else {
626                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
627         }
628 }
629
630 /**
631  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
632  *
633  * @adev: amdgpu_device pointer
634  * @index: doorbell index
635  *
636  * Returns the value in the doorbell aperture at the
637  * requested doorbell index (VEGA10+).
638  */
639 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
640 {
641         if (amdgpu_device_skip_hw_access(adev))
642                 return 0;
643
644         if (index < adev->doorbell.num_doorbells) {
645                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
646         } else {
647                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
648                 return 0;
649         }
650 }
651
652 /**
653  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
654  *
655  * @adev: amdgpu_device pointer
656  * @index: doorbell index
657  * @v: value to write
658  *
659  * Writes @v to the doorbell aperture at the
660  * requested doorbell index (VEGA10+).
661  */
662 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
663 {
664         if (amdgpu_device_skip_hw_access(adev))
665                 return;
666
667         if (index < adev->doorbell.num_doorbells) {
668                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
669         } else {
670                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
671         }
672 }
673
674 /**
675  * amdgpu_device_indirect_rreg - read an indirect register
676  *
677  * @adev: amdgpu_device pointer
678  * @pcie_index: mmio register offset
679  * @pcie_data: mmio register offset
680  * @reg_addr: indirect register address to read from
681  *
682  * Returns the value of indirect register @reg_addr
683  */
684 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
685                                 u32 pcie_index, u32 pcie_data,
686                                 u32 reg_addr)
687 {
688         unsigned long flags;
689         u32 r;
690         void __iomem *pcie_index_offset;
691         void __iomem *pcie_data_offset;
692
693         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
694         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
695         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
696
697         writel(reg_addr, pcie_index_offset);
698         readl(pcie_index_offset);
699         r = readl(pcie_data_offset);
700         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
701
702         return r;
703 }
704
705 /**
706  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
707  *
708  * @adev: amdgpu_device pointer
709  * @pcie_index: mmio register offset
710  * @pcie_data: mmio register offset
711  * @reg_addr: indirect register address to read from
712  *
713  * Returns the value of indirect register @reg_addr
714  */
715 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
716                                   u32 pcie_index, u32 pcie_data,
717                                   u32 reg_addr)
718 {
719         unsigned long flags;
720         u64 r;
721         void __iomem *pcie_index_offset;
722         void __iomem *pcie_data_offset;
723
724         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
725         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
726         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
727
728         /* read low 32 bits */
729         writel(reg_addr, pcie_index_offset);
730         readl(pcie_index_offset);
731         r = readl(pcie_data_offset);
732         /* read high 32 bits */
733         writel(reg_addr + 4, pcie_index_offset);
734         readl(pcie_index_offset);
735         r |= ((u64)readl(pcie_data_offset) << 32);
736         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
737
738         return r;
739 }
740
741 /**
742  * amdgpu_device_indirect_wreg - write an indirect register address
743  *
744  * @adev: amdgpu_device pointer
745  * @pcie_index: mmio register offset
746  * @pcie_data: mmio register offset
747  * @reg_addr: indirect register offset
748  * @reg_data: indirect register data
749  *
750  */
751 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
752                                  u32 pcie_index, u32 pcie_data,
753                                  u32 reg_addr, u32 reg_data)
754 {
755         unsigned long flags;
756         void __iomem *pcie_index_offset;
757         void __iomem *pcie_data_offset;
758
759         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
760         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
761         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
762
763         writel(reg_addr, pcie_index_offset);
764         readl(pcie_index_offset);
765         writel(reg_data, pcie_data_offset);
766         readl(pcie_data_offset);
767         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
768 }
769
770 /**
771  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
772  *
773  * @adev: amdgpu_device pointer
774  * @pcie_index: mmio register offset
775  * @pcie_data: mmio register offset
776  * @reg_addr: indirect register offset
777  * @reg_data: indirect register data
778  *
779  */
780 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
781                                    u32 pcie_index, u32 pcie_data,
782                                    u32 reg_addr, u64 reg_data)
783 {
784         unsigned long flags;
785         void __iomem *pcie_index_offset;
786         void __iomem *pcie_data_offset;
787
788         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
789         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
790         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
791
792         /* write low 32 bits */
793         writel(reg_addr, pcie_index_offset);
794         readl(pcie_index_offset);
795         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
796         readl(pcie_data_offset);
797         /* write high 32 bits */
798         writel(reg_addr + 4, pcie_index_offset);
799         readl(pcie_index_offset);
800         writel((u32)(reg_data >> 32), pcie_data_offset);
801         readl(pcie_data_offset);
802         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
803 }
804
805 /**
806  * amdgpu_invalid_rreg - dummy reg read function
807  *
808  * @adev: amdgpu_device pointer
809  * @reg: offset of register
810  *
811  * Dummy register read function.  Used for register blocks
812  * that certain asics don't have (all asics).
813  * Returns the value in the register.
814  */
815 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
816 {
817         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
818         BUG();
819         return 0;
820 }
821
822 /**
823  * amdgpu_invalid_wreg - dummy reg write function
824  *
825  * @adev: amdgpu_device pointer
826  * @reg: offset of register
827  * @v: value to write to the register
828  *
829  * Dummy register read function.  Used for register blocks
830  * that certain asics don't have (all asics).
831  */
832 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
833 {
834         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
835                   reg, v);
836         BUG();
837 }
838
839 /**
840  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
841  *
842  * @adev: amdgpu_device pointer
843  * @reg: offset of register
844  *
845  * Dummy register read function.  Used for register blocks
846  * that certain asics don't have (all asics).
847  * Returns the value in the register.
848  */
849 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
850 {
851         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
852         BUG();
853         return 0;
854 }
855
856 /**
857  * amdgpu_invalid_wreg64 - dummy reg write function
858  *
859  * @adev: amdgpu_device pointer
860  * @reg: offset of register
861  * @v: value to write to the register
862  *
863  * Dummy register read function.  Used for register blocks
864  * that certain asics don't have (all asics).
865  */
866 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
867 {
868         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
869                   reg, v);
870         BUG();
871 }
872
873 /**
874  * amdgpu_block_invalid_rreg - dummy reg read function
875  *
876  * @adev: amdgpu_device pointer
877  * @block: offset of instance
878  * @reg: offset of register
879  *
880  * Dummy register read function.  Used for register blocks
881  * that certain asics don't have (all asics).
882  * Returns the value in the register.
883  */
884 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
885                                           uint32_t block, uint32_t reg)
886 {
887         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
888                   reg, block);
889         BUG();
890         return 0;
891 }
892
893 /**
894  * amdgpu_block_invalid_wreg - dummy reg write function
895  *
896  * @adev: amdgpu_device pointer
897  * @block: offset of instance
898  * @reg: offset of register
899  * @v: value to write to the register
900  *
901  * Dummy register read function.  Used for register blocks
902  * that certain asics don't have (all asics).
903  */
904 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
905                                       uint32_t block,
906                                       uint32_t reg, uint32_t v)
907 {
908         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
909                   reg, block, v);
910         BUG();
911 }
912
913 /**
914  * amdgpu_device_asic_init - Wrapper for atom asic_init
915  *
916  * @adev: amdgpu_device pointer
917  *
918  * Does any asic specific work and then calls atom asic init.
919  */
920 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
921 {
922         amdgpu_asic_pre_asic_init(adev);
923
924         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
925                 return amdgpu_atomfirmware_asic_init(adev, true);
926         else
927                 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
928 }
929
930 /**
931  * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
932  *
933  * @adev: amdgpu_device pointer
934  *
935  * Allocates a scratch page of VRAM for use by various things in the
936  * driver.
937  */
938 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
939 {
940         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
941                                        AMDGPU_GEM_DOMAIN_VRAM |
942                                        AMDGPU_GEM_DOMAIN_GTT,
943                                        &adev->mem_scratch.robj,
944                                        &adev->mem_scratch.gpu_addr,
945                                        (void **)&adev->mem_scratch.ptr);
946 }
947
948 /**
949  * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
950  *
951  * @adev: amdgpu_device pointer
952  *
953  * Frees the VRAM scratch page.
954  */
955 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
956 {
957         amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
958 }
959
960 /**
961  * amdgpu_device_program_register_sequence - program an array of registers.
962  *
963  * @adev: amdgpu_device pointer
964  * @registers: pointer to the register array
965  * @array_size: size of the register array
966  *
967  * Programs an array or registers with and and or masks.
968  * This is a helper for setting golden registers.
969  */
970 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
971                                              const u32 *registers,
972                                              const u32 array_size)
973 {
974         u32 tmp, reg, and_mask, or_mask;
975         int i;
976
977         if (array_size % 3)
978                 return;
979
980         for (i = 0; i < array_size; i +=3) {
981                 reg = registers[i + 0];
982                 and_mask = registers[i + 1];
983                 or_mask = registers[i + 2];
984
985                 if (and_mask == 0xffffffff) {
986                         tmp = or_mask;
987                 } else {
988                         tmp = RREG32(reg);
989                         tmp &= ~and_mask;
990                         if (adev->family >= AMDGPU_FAMILY_AI)
991                                 tmp |= (or_mask & and_mask);
992                         else
993                                 tmp |= or_mask;
994                 }
995                 WREG32(reg, tmp);
996         }
997 }
998
999 /**
1000  * amdgpu_device_pci_config_reset - reset the GPU
1001  *
1002  * @adev: amdgpu_device pointer
1003  *
1004  * Resets the GPU using the pci config reset sequence.
1005  * Only applicable to asics prior to vega10.
1006  */
1007 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1008 {
1009         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1010 }
1011
1012 /**
1013  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1014  *
1015  * @adev: amdgpu_device pointer
1016  *
1017  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1018  */
1019 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1020 {
1021         return pci_reset_function(adev->pdev);
1022 }
1023
1024 /*
1025  * GPU doorbell aperture helpers function.
1026  */
1027 /**
1028  * amdgpu_device_doorbell_init - Init doorbell driver information.
1029  *
1030  * @adev: amdgpu_device pointer
1031  *
1032  * Init doorbell driver information (CIK)
1033  * Returns 0 on success, error on failure.
1034  */
1035 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1036 {
1037
1038         /* No doorbell on SI hardware generation */
1039         if (adev->asic_type < CHIP_BONAIRE) {
1040                 adev->doorbell.base = 0;
1041                 adev->doorbell.size = 0;
1042                 adev->doorbell.num_doorbells = 0;
1043                 adev->doorbell.ptr = NULL;
1044                 return 0;
1045         }
1046
1047         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1048                 return -EINVAL;
1049
1050         amdgpu_asic_init_doorbell_index(adev);
1051
1052         /* doorbell bar mapping */
1053         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1054         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1055
1056         if (adev->enable_mes) {
1057                 adev->doorbell.num_doorbells =
1058                         adev->doorbell.size / sizeof(u32);
1059         } else {
1060                 adev->doorbell.num_doorbells =
1061                         min_t(u32, adev->doorbell.size / sizeof(u32),
1062                               adev->doorbell_index.max_assignment+1);
1063                 if (adev->doorbell.num_doorbells == 0)
1064                         return -EINVAL;
1065
1066                 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1067                  * paging queue doorbell use the second page. The
1068                  * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1069                  * doorbells are in the first page. So with paging queue enabled,
1070                  * the max num_doorbells should + 1 page (0x400 in dword)
1071                  */
1072                 if (adev->asic_type >= CHIP_VEGA10)
1073                         adev->doorbell.num_doorbells += 0x400;
1074         }
1075
1076         adev->doorbell.ptr = ioremap(adev->doorbell.base,
1077                                      adev->doorbell.num_doorbells *
1078                                      sizeof(u32));
1079         if (adev->doorbell.ptr == NULL)
1080                 return -ENOMEM;
1081
1082         return 0;
1083 }
1084
1085 /**
1086  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1087  *
1088  * @adev: amdgpu_device pointer
1089  *
1090  * Tear down doorbell driver information (CIK)
1091  */
1092 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1093 {
1094         iounmap(adev->doorbell.ptr);
1095         adev->doorbell.ptr = NULL;
1096 }
1097
1098
1099
1100 /*
1101  * amdgpu_device_wb_*()
1102  * Writeback is the method by which the GPU updates special pages in memory
1103  * with the status of certain GPU events (fences, ring pointers,etc.).
1104  */
1105
1106 /**
1107  * amdgpu_device_wb_fini - Disable Writeback and free memory
1108  *
1109  * @adev: amdgpu_device pointer
1110  *
1111  * Disables Writeback and frees the Writeback memory (all asics).
1112  * Used at driver shutdown.
1113  */
1114 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1115 {
1116         if (adev->wb.wb_obj) {
1117                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1118                                       &adev->wb.gpu_addr,
1119                                       (void **)&adev->wb.wb);
1120                 adev->wb.wb_obj = NULL;
1121         }
1122 }
1123
1124 /**
1125  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1126  *
1127  * @adev: amdgpu_device pointer
1128  *
1129  * Initializes writeback and allocates writeback memory (all asics).
1130  * Used at driver startup.
1131  * Returns 0 on success or an -error on failure.
1132  */
1133 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1134 {
1135         int r;
1136
1137         if (adev->wb.wb_obj == NULL) {
1138                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1139                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1140                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1141                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1142                                             (void **)&adev->wb.wb);
1143                 if (r) {
1144                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1145                         return r;
1146                 }
1147
1148                 adev->wb.num_wb = AMDGPU_MAX_WB;
1149                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1150
1151                 /* clear wb memory */
1152                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1153         }
1154
1155         return 0;
1156 }
1157
1158 /**
1159  * amdgpu_device_wb_get - Allocate a wb entry
1160  *
1161  * @adev: amdgpu_device pointer
1162  * @wb: wb index
1163  *
1164  * Allocate a wb slot for use by the driver (all asics).
1165  * Returns 0 on success or -EINVAL on failure.
1166  */
1167 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1168 {
1169         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1170
1171         if (offset < adev->wb.num_wb) {
1172                 __set_bit(offset, adev->wb.used);
1173                 *wb = offset << 3; /* convert to dw offset */
1174                 return 0;
1175         } else {
1176                 return -EINVAL;
1177         }
1178 }
1179
1180 /**
1181  * amdgpu_device_wb_free - Free a wb entry
1182  *
1183  * @adev: amdgpu_device pointer
1184  * @wb: wb index
1185  *
1186  * Free a wb slot allocated for use by the driver (all asics)
1187  */
1188 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1189 {
1190         wb >>= 3;
1191         if (wb < adev->wb.num_wb)
1192                 __clear_bit(wb, adev->wb.used);
1193 }
1194
1195 /**
1196  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1197  *
1198  * @adev: amdgpu_device pointer
1199  *
1200  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1201  * to fail, but if any of the BARs is not accessible after the size we abort
1202  * driver loading by returning -ENODEV.
1203  */
1204 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1205 {
1206         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1207         struct pci_bus *root;
1208         struct resource *res;
1209         unsigned i;
1210         u16 cmd;
1211         int r;
1212
1213         /* Bypass for VF */
1214         if (amdgpu_sriov_vf(adev))
1215                 return 0;
1216
1217         /* skip if the bios has already enabled large BAR */
1218         if (adev->gmc.real_vram_size &&
1219             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1220                 return 0;
1221
1222         /* Check if the root BUS has 64bit memory resources */
1223         root = adev->pdev->bus;
1224         while (root->parent)
1225                 root = root->parent;
1226
1227         pci_bus_for_each_resource(root, res, i) {
1228                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1229                     res->start > 0x100000000ull)
1230                         break;
1231         }
1232
1233         /* Trying to resize is pointless without a root hub window above 4GB */
1234         if (!res)
1235                 return 0;
1236
1237         /* Limit the BAR size to what is available */
1238         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1239                         rbar_size);
1240
1241         /* Disable memory decoding while we change the BAR addresses and size */
1242         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1243         pci_write_config_word(adev->pdev, PCI_COMMAND,
1244                               cmd & ~PCI_COMMAND_MEMORY);
1245
1246         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1247         amdgpu_device_doorbell_fini(adev);
1248         if (adev->asic_type >= CHIP_BONAIRE)
1249                 pci_release_resource(adev->pdev, 2);
1250
1251         pci_release_resource(adev->pdev, 0);
1252
1253         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1254         if (r == -ENOSPC)
1255                 DRM_INFO("Not enough PCI address space for a large BAR.");
1256         else if (r && r != -ENOTSUPP)
1257                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1258
1259         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1260
1261         /* When the doorbell or fb BAR isn't available we have no chance of
1262          * using the device.
1263          */
1264         r = amdgpu_device_doorbell_init(adev);
1265         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1266                 return -ENODEV;
1267
1268         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1269
1270         return 0;
1271 }
1272
1273 /*
1274  * GPU helpers function.
1275  */
1276 /**
1277  * amdgpu_device_need_post - check if the hw need post or not
1278  *
1279  * @adev: amdgpu_device pointer
1280  *
1281  * Check if the asic has been initialized (all asics) at driver startup
1282  * or post is needed if  hw reset is performed.
1283  * Returns true if need or false if not.
1284  */
1285 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1286 {
1287         uint32_t reg;
1288
1289         if (amdgpu_sriov_vf(adev))
1290                 return false;
1291
1292         if (amdgpu_passthrough(adev)) {
1293                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1294                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1295                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1296                  * vpost executed for smc version below 22.15
1297                  */
1298                 if (adev->asic_type == CHIP_FIJI) {
1299                         int err;
1300                         uint32_t fw_ver;
1301                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1302                         /* force vPost if error occured */
1303                         if (err)
1304                                 return true;
1305
1306                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1307                         if (fw_ver < 0x00160e00)
1308                                 return true;
1309                 }
1310         }
1311
1312         /* Don't post if we need to reset whole hive on init */
1313         if (adev->gmc.xgmi.pending_reset)
1314                 return false;
1315
1316         if (adev->has_hw_reset) {
1317                 adev->has_hw_reset = false;
1318                 return true;
1319         }
1320
1321         /* bios scratch used on CIK+ */
1322         if (adev->asic_type >= CHIP_BONAIRE)
1323                 return amdgpu_atombios_scratch_need_asic_init(adev);
1324
1325         /* check MEM_SIZE for older asics */
1326         reg = amdgpu_asic_get_config_memsize(adev);
1327
1328         if ((reg != 0) && (reg != 0xffffffff))
1329                 return false;
1330
1331         return true;
1332 }
1333
1334 /**
1335  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1336  *
1337  * @adev: amdgpu_device pointer
1338  *
1339  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1340  * be set for this device.
1341  *
1342  * Returns true if it should be used or false if not.
1343  */
1344 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1345 {
1346         switch (amdgpu_aspm) {
1347         case -1:
1348                 break;
1349         case 0:
1350                 return false;
1351         case 1:
1352                 return true;
1353         default:
1354                 return false;
1355         }
1356         return pcie_aspm_enabled(adev->pdev);
1357 }
1358
1359 /* if we get transitioned to only one device, take VGA back */
1360 /**
1361  * amdgpu_device_vga_set_decode - enable/disable vga decode
1362  *
1363  * @pdev: PCI device pointer
1364  * @state: enable/disable vga decode
1365  *
1366  * Enable/disable vga decode (all asics).
1367  * Returns VGA resource flags.
1368  */
1369 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1370                 bool state)
1371 {
1372         struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1373         amdgpu_asic_set_vga_state(adev, state);
1374         if (state)
1375                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1376                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1377         else
1378                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1379 }
1380
1381 /**
1382  * amdgpu_device_check_block_size - validate the vm block size
1383  *
1384  * @adev: amdgpu_device pointer
1385  *
1386  * Validates the vm block size specified via module parameter.
1387  * The vm block size defines number of bits in page table versus page directory,
1388  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1389  * page table and the remaining bits are in the page directory.
1390  */
1391 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1392 {
1393         /* defines number of bits in page table versus page directory,
1394          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1395          * page table and the remaining bits are in the page directory */
1396         if (amdgpu_vm_block_size == -1)
1397                 return;
1398
1399         if (amdgpu_vm_block_size < 9) {
1400                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1401                          amdgpu_vm_block_size);
1402                 amdgpu_vm_block_size = -1;
1403         }
1404 }
1405
1406 /**
1407  * amdgpu_device_check_vm_size - validate the vm size
1408  *
1409  * @adev: amdgpu_device pointer
1410  *
1411  * Validates the vm size in GB specified via module parameter.
1412  * The VM size is the size of the GPU virtual memory space in GB.
1413  */
1414 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1415 {
1416         /* no need to check the default value */
1417         if (amdgpu_vm_size == -1)
1418                 return;
1419
1420         if (amdgpu_vm_size < 1) {
1421                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1422                          amdgpu_vm_size);
1423                 amdgpu_vm_size = -1;
1424         }
1425 }
1426
1427 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1428 {
1429         struct sysinfo si;
1430         bool is_os_64 = (sizeof(void *) == 8);
1431         uint64_t total_memory;
1432         uint64_t dram_size_seven_GB = 0x1B8000000;
1433         uint64_t dram_size_three_GB = 0xB8000000;
1434
1435         if (amdgpu_smu_memory_pool_size == 0)
1436                 return;
1437
1438         if (!is_os_64) {
1439                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1440                 goto def_value;
1441         }
1442         si_meminfo(&si);
1443         total_memory = (uint64_t)si.totalram * si.mem_unit;
1444
1445         if ((amdgpu_smu_memory_pool_size == 1) ||
1446                 (amdgpu_smu_memory_pool_size == 2)) {
1447                 if (total_memory < dram_size_three_GB)
1448                         goto def_value1;
1449         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1450                 (amdgpu_smu_memory_pool_size == 8)) {
1451                 if (total_memory < dram_size_seven_GB)
1452                         goto def_value1;
1453         } else {
1454                 DRM_WARN("Smu memory pool size not supported\n");
1455                 goto def_value;
1456         }
1457         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1458
1459         return;
1460
1461 def_value1:
1462         DRM_WARN("No enough system memory\n");
1463 def_value:
1464         adev->pm.smu_prv_buffer_size = 0;
1465 }
1466
1467 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1468 {
1469         if (!(adev->flags & AMD_IS_APU) ||
1470             adev->asic_type < CHIP_RAVEN)
1471                 return 0;
1472
1473         switch (adev->asic_type) {
1474         case CHIP_RAVEN:
1475                 if (adev->pdev->device == 0x15dd)
1476                         adev->apu_flags |= AMD_APU_IS_RAVEN;
1477                 if (adev->pdev->device == 0x15d8)
1478                         adev->apu_flags |= AMD_APU_IS_PICASSO;
1479                 break;
1480         case CHIP_RENOIR:
1481                 if ((adev->pdev->device == 0x1636) ||
1482                     (adev->pdev->device == 0x164c))
1483                         adev->apu_flags |= AMD_APU_IS_RENOIR;
1484                 else
1485                         adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1486                 break;
1487         case CHIP_VANGOGH:
1488                 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1489                 break;
1490         case CHIP_YELLOW_CARP:
1491                 break;
1492         case CHIP_CYAN_SKILLFISH:
1493                 if ((adev->pdev->device == 0x13FE) ||
1494                     (adev->pdev->device == 0x143F))
1495                         adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1496                 break;
1497         default:
1498                 break;
1499         }
1500
1501         return 0;
1502 }
1503
1504 /**
1505  * amdgpu_device_check_arguments - validate module params
1506  *
1507  * @adev: amdgpu_device pointer
1508  *
1509  * Validates certain module parameters and updates
1510  * the associated values used by the driver (all asics).
1511  */
1512 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1513 {
1514         if (amdgpu_sched_jobs < 4) {
1515                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1516                          amdgpu_sched_jobs);
1517                 amdgpu_sched_jobs = 4;
1518         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1519                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1520                          amdgpu_sched_jobs);
1521                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1522         }
1523
1524         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1525                 /* gart size must be greater or equal to 32M */
1526                 dev_warn(adev->dev, "gart size (%d) too small\n",
1527                          amdgpu_gart_size);
1528                 amdgpu_gart_size = -1;
1529         }
1530
1531         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1532                 /* gtt size must be greater or equal to 32M */
1533                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1534                                  amdgpu_gtt_size);
1535                 amdgpu_gtt_size = -1;
1536         }
1537
1538         /* valid range is between 4 and 9 inclusive */
1539         if (amdgpu_vm_fragment_size != -1 &&
1540             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1541                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1542                 amdgpu_vm_fragment_size = -1;
1543         }
1544
1545         if (amdgpu_sched_hw_submission < 2) {
1546                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1547                          amdgpu_sched_hw_submission);
1548                 amdgpu_sched_hw_submission = 2;
1549         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1550                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1551                          amdgpu_sched_hw_submission);
1552                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1553         }
1554
1555         if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1556                 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1557                 amdgpu_reset_method = -1;
1558         }
1559
1560         amdgpu_device_check_smu_prv_buffer_size(adev);
1561
1562         amdgpu_device_check_vm_size(adev);
1563
1564         amdgpu_device_check_block_size(adev);
1565
1566         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1567
1568         return 0;
1569 }
1570
1571 /**
1572  * amdgpu_switcheroo_set_state - set switcheroo state
1573  *
1574  * @pdev: pci dev pointer
1575  * @state: vga_switcheroo state
1576  *
1577  * Callback for the switcheroo driver.  Suspends or resumes
1578  * the asics before or after it is powered up using ACPI methods.
1579  */
1580 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1581                                         enum vga_switcheroo_state state)
1582 {
1583         struct drm_device *dev = pci_get_drvdata(pdev);
1584         int r;
1585
1586         if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1587                 return;
1588
1589         if (state == VGA_SWITCHEROO_ON) {
1590                 pr_info("switched on\n");
1591                 /* don't suspend or resume card normally */
1592                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1593
1594                 pci_set_power_state(pdev, PCI_D0);
1595                 amdgpu_device_load_pci_state(pdev);
1596                 r = pci_enable_device(pdev);
1597                 if (r)
1598                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1599                 amdgpu_device_resume(dev, true);
1600
1601                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1602         } else {
1603                 pr_info("switched off\n");
1604                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1605                 amdgpu_device_suspend(dev, true);
1606                 amdgpu_device_cache_pci_state(pdev);
1607                 /* Shut down the device */
1608                 pci_disable_device(pdev);
1609                 pci_set_power_state(pdev, PCI_D3cold);
1610                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1611         }
1612 }
1613
1614 /**
1615  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1616  *
1617  * @pdev: pci dev pointer
1618  *
1619  * Callback for the switcheroo driver.  Check of the switcheroo
1620  * state can be changed.
1621  * Returns true if the state can be changed, false if not.
1622  */
1623 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1624 {
1625         struct drm_device *dev = pci_get_drvdata(pdev);
1626
1627         /*
1628         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1629         * locking inversion with the driver load path. And the access here is
1630         * completely racy anyway. So don't bother with locking for now.
1631         */
1632         return atomic_read(&dev->open_count) == 0;
1633 }
1634
1635 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1636         .set_gpu_state = amdgpu_switcheroo_set_state,
1637         .reprobe = NULL,
1638         .can_switch = amdgpu_switcheroo_can_switch,
1639 };
1640
1641 /**
1642  * amdgpu_device_ip_set_clockgating_state - set the CG state
1643  *
1644  * @dev: amdgpu_device pointer
1645  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1646  * @state: clockgating state (gate or ungate)
1647  *
1648  * Sets the requested clockgating state for all instances of
1649  * the hardware IP specified.
1650  * Returns the error code from the last instance.
1651  */
1652 int amdgpu_device_ip_set_clockgating_state(void *dev,
1653                                            enum amd_ip_block_type block_type,
1654                                            enum amd_clockgating_state state)
1655 {
1656         struct amdgpu_device *adev = dev;
1657         int i, r = 0;
1658
1659         for (i = 0; i < adev->num_ip_blocks; i++) {
1660                 if (!adev->ip_blocks[i].status.valid)
1661                         continue;
1662                 if (adev->ip_blocks[i].version->type != block_type)
1663                         continue;
1664                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1665                         continue;
1666                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1667                         (void *)adev, state);
1668                 if (r)
1669                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1670                                   adev->ip_blocks[i].version->funcs->name, r);
1671         }
1672         return r;
1673 }
1674
1675 /**
1676  * amdgpu_device_ip_set_powergating_state - set the PG state
1677  *
1678  * @dev: amdgpu_device pointer
1679  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1680  * @state: powergating state (gate or ungate)
1681  *
1682  * Sets the requested powergating state for all instances of
1683  * the hardware IP specified.
1684  * Returns the error code from the last instance.
1685  */
1686 int amdgpu_device_ip_set_powergating_state(void *dev,
1687                                            enum amd_ip_block_type block_type,
1688                                            enum amd_powergating_state state)
1689 {
1690         struct amdgpu_device *adev = dev;
1691         int i, r = 0;
1692
1693         for (i = 0; i < adev->num_ip_blocks; i++) {
1694                 if (!adev->ip_blocks[i].status.valid)
1695                         continue;
1696                 if (adev->ip_blocks[i].version->type != block_type)
1697                         continue;
1698                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1699                         continue;
1700                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1701                         (void *)adev, state);
1702                 if (r)
1703                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1704                                   adev->ip_blocks[i].version->funcs->name, r);
1705         }
1706         return r;
1707 }
1708
1709 /**
1710  * amdgpu_device_ip_get_clockgating_state - get the CG state
1711  *
1712  * @adev: amdgpu_device pointer
1713  * @flags: clockgating feature flags
1714  *
1715  * Walks the list of IPs on the device and updates the clockgating
1716  * flags for each IP.
1717  * Updates @flags with the feature flags for each hardware IP where
1718  * clockgating is enabled.
1719  */
1720 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1721                                             u64 *flags)
1722 {
1723         int i;
1724
1725         for (i = 0; i < adev->num_ip_blocks; i++) {
1726                 if (!adev->ip_blocks[i].status.valid)
1727                         continue;
1728                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1729                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1730         }
1731 }
1732
1733 /**
1734  * amdgpu_device_ip_wait_for_idle - wait for idle
1735  *
1736  * @adev: amdgpu_device pointer
1737  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1738  *
1739  * Waits for the request hardware IP to be idle.
1740  * Returns 0 for success or a negative error code on failure.
1741  */
1742 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1743                                    enum amd_ip_block_type block_type)
1744 {
1745         int i, r;
1746
1747         for (i = 0; i < adev->num_ip_blocks; i++) {
1748                 if (!adev->ip_blocks[i].status.valid)
1749                         continue;
1750                 if (adev->ip_blocks[i].version->type == block_type) {
1751                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1752                         if (r)
1753                                 return r;
1754                         break;
1755                 }
1756         }
1757         return 0;
1758
1759 }
1760
1761 /**
1762  * amdgpu_device_ip_is_idle - is the hardware IP idle
1763  *
1764  * @adev: amdgpu_device pointer
1765  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1766  *
1767  * Check if the hardware IP is idle or not.
1768  * Returns true if it the IP is idle, false if not.
1769  */
1770 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1771                               enum amd_ip_block_type block_type)
1772 {
1773         int i;
1774
1775         for (i = 0; i < adev->num_ip_blocks; i++) {
1776                 if (!adev->ip_blocks[i].status.valid)
1777                         continue;
1778                 if (adev->ip_blocks[i].version->type == block_type)
1779                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1780         }
1781         return true;
1782
1783 }
1784
1785 /**
1786  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1787  *
1788  * @adev: amdgpu_device pointer
1789  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1790  *
1791  * Returns a pointer to the hardware IP block structure
1792  * if it exists for the asic, otherwise NULL.
1793  */
1794 struct amdgpu_ip_block *
1795 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1796                               enum amd_ip_block_type type)
1797 {
1798         int i;
1799
1800         for (i = 0; i < adev->num_ip_blocks; i++)
1801                 if (adev->ip_blocks[i].version->type == type)
1802                         return &adev->ip_blocks[i];
1803
1804         return NULL;
1805 }
1806
1807 /**
1808  * amdgpu_device_ip_block_version_cmp
1809  *
1810  * @adev: amdgpu_device pointer
1811  * @type: enum amd_ip_block_type
1812  * @major: major version
1813  * @minor: minor version
1814  *
1815  * return 0 if equal or greater
1816  * return 1 if smaller or the ip_block doesn't exist
1817  */
1818 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1819                                        enum amd_ip_block_type type,
1820                                        u32 major, u32 minor)
1821 {
1822         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1823
1824         if (ip_block && ((ip_block->version->major > major) ||
1825                         ((ip_block->version->major == major) &&
1826                         (ip_block->version->minor >= minor))))
1827                 return 0;
1828
1829         return 1;
1830 }
1831
1832 /**
1833  * amdgpu_device_ip_block_add
1834  *
1835  * @adev: amdgpu_device pointer
1836  * @ip_block_version: pointer to the IP to add
1837  *
1838  * Adds the IP block driver information to the collection of IPs
1839  * on the asic.
1840  */
1841 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1842                                const struct amdgpu_ip_block_version *ip_block_version)
1843 {
1844         if (!ip_block_version)
1845                 return -EINVAL;
1846
1847         switch (ip_block_version->type) {
1848         case AMD_IP_BLOCK_TYPE_VCN:
1849                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1850                         return 0;
1851                 break;
1852         case AMD_IP_BLOCK_TYPE_JPEG:
1853                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1854                         return 0;
1855                 break;
1856         default:
1857                 break;
1858         }
1859
1860         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1861                   ip_block_version->funcs->name);
1862
1863         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1864
1865         return 0;
1866 }
1867
1868 /**
1869  * amdgpu_device_enable_virtual_display - enable virtual display feature
1870  *
1871  * @adev: amdgpu_device pointer
1872  *
1873  * Enabled the virtual display feature if the user has enabled it via
1874  * the module parameter virtual_display.  This feature provides a virtual
1875  * display hardware on headless boards or in virtualized environments.
1876  * This function parses and validates the configuration string specified by
1877  * the user and configues the virtual display configuration (number of
1878  * virtual connectors, crtcs, etc.) specified.
1879  */
1880 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1881 {
1882         adev->enable_virtual_display = false;
1883
1884         if (amdgpu_virtual_display) {
1885                 const char *pci_address_name = pci_name(adev->pdev);
1886                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1887
1888                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1889                 pciaddstr_tmp = pciaddstr;
1890                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1891                         pciaddname = strsep(&pciaddname_tmp, ",");
1892                         if (!strcmp("all", pciaddname)
1893                             || !strcmp(pci_address_name, pciaddname)) {
1894                                 long num_crtc;
1895                                 int res = -1;
1896
1897                                 adev->enable_virtual_display = true;
1898
1899                                 if (pciaddname_tmp)
1900                                         res = kstrtol(pciaddname_tmp, 10,
1901                                                       &num_crtc);
1902
1903                                 if (!res) {
1904                                         if (num_crtc < 1)
1905                                                 num_crtc = 1;
1906                                         if (num_crtc > 6)
1907                                                 num_crtc = 6;
1908                                         adev->mode_info.num_crtc = num_crtc;
1909                                 } else {
1910                                         adev->mode_info.num_crtc = 1;
1911                                 }
1912                                 break;
1913                         }
1914                 }
1915
1916                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1917                          amdgpu_virtual_display, pci_address_name,
1918                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1919
1920                 kfree(pciaddstr);
1921         }
1922 }
1923
1924 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
1925 {
1926         if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
1927                 adev->mode_info.num_crtc = 1;
1928                 adev->enable_virtual_display = true;
1929                 DRM_INFO("virtual_display:%d, num_crtc:%d\n",
1930                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1931         }
1932 }
1933
1934 /**
1935  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1936  *
1937  * @adev: amdgpu_device pointer
1938  *
1939  * Parses the asic configuration parameters specified in the gpu info
1940  * firmware and makes them availale to the driver for use in configuring
1941  * the asic.
1942  * Returns 0 on success, -EINVAL on failure.
1943  */
1944 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1945 {
1946         const char *chip_name;
1947         char fw_name[40];
1948         int err;
1949         const struct gpu_info_firmware_header_v1_0 *hdr;
1950
1951         adev->firmware.gpu_info_fw = NULL;
1952
1953         if (adev->mman.discovery_bin) {
1954                 /*
1955                  * FIXME: The bounding box is still needed by Navi12, so
1956                  * temporarily read it from gpu_info firmware. Should be dropped
1957                  * when DAL no longer needs it.
1958                  */
1959                 if (adev->asic_type != CHIP_NAVI12)
1960                         return 0;
1961         }
1962
1963         switch (adev->asic_type) {
1964         default:
1965                 return 0;
1966         case CHIP_VEGA10:
1967                 chip_name = "vega10";
1968                 break;
1969         case CHIP_VEGA12:
1970                 chip_name = "vega12";
1971                 break;
1972         case CHIP_RAVEN:
1973                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1974                         chip_name = "raven2";
1975                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1976                         chip_name = "picasso";
1977                 else
1978                         chip_name = "raven";
1979                 break;
1980         case CHIP_ARCTURUS:
1981                 chip_name = "arcturus";
1982                 break;
1983         case CHIP_NAVI12:
1984                 chip_name = "navi12";
1985                 break;
1986         }
1987
1988         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1989         err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
1990         if (err) {
1991                 dev_err(adev->dev,
1992                         "Failed to get gpu_info firmware \"%s\"\n",
1993                         fw_name);
1994                 goto out;
1995         }
1996
1997         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1998         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1999
2000         switch (hdr->version_major) {
2001         case 1:
2002         {
2003                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2004                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2005                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2006
2007                 /*
2008                  * Should be droped when DAL no longer needs it.
2009                  */
2010                 if (adev->asic_type == CHIP_NAVI12)
2011                         goto parse_soc_bounding_box;
2012
2013                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2014                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2015                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2016                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2017                 adev->gfx.config.max_texture_channel_caches =
2018                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
2019                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2020                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2021                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2022                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2023                 adev->gfx.config.double_offchip_lds_buf =
2024                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2025                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2026                 adev->gfx.cu_info.max_waves_per_simd =
2027                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2028                 adev->gfx.cu_info.max_scratch_slots_per_cu =
2029                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2030                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2031                 if (hdr->version_minor >= 1) {
2032                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2033                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2034                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2035                         adev->gfx.config.num_sc_per_sh =
2036                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2037                         adev->gfx.config.num_packer_per_sc =
2038                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2039                 }
2040
2041 parse_soc_bounding_box:
2042                 /*
2043                  * soc bounding box info is not integrated in disocovery table,
2044                  * we always need to parse it from gpu info firmware if needed.
2045                  */
2046                 if (hdr->version_minor == 2) {
2047                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2048                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2049                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2050                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2051                 }
2052                 break;
2053         }
2054         default:
2055                 dev_err(adev->dev,
2056                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2057                 err = -EINVAL;
2058                 goto out;
2059         }
2060 out:
2061         return err;
2062 }
2063
2064 /**
2065  * amdgpu_device_ip_early_init - run early init for hardware IPs
2066  *
2067  * @adev: amdgpu_device pointer
2068  *
2069  * Early initialization pass for hardware IPs.  The hardware IPs that make
2070  * up each asic are discovered each IP's early_init callback is run.  This
2071  * is the first stage in initializing the asic.
2072  * Returns 0 on success, negative error code on failure.
2073  */
2074 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2075 {
2076         struct drm_device *dev = adev_to_drm(adev);
2077         struct pci_dev *parent;
2078         int i, r;
2079
2080         amdgpu_device_enable_virtual_display(adev);
2081
2082         if (amdgpu_sriov_vf(adev)) {
2083                 r = amdgpu_virt_request_full_gpu(adev, true);
2084                 if (r)
2085                         return r;
2086         }
2087
2088         switch (adev->asic_type) {
2089 #ifdef CONFIG_DRM_AMDGPU_SI
2090         case CHIP_VERDE:
2091         case CHIP_TAHITI:
2092         case CHIP_PITCAIRN:
2093         case CHIP_OLAND:
2094         case CHIP_HAINAN:
2095                 adev->family = AMDGPU_FAMILY_SI;
2096                 r = si_set_ip_blocks(adev);
2097                 if (r)
2098                         return r;
2099                 break;
2100 #endif
2101 #ifdef CONFIG_DRM_AMDGPU_CIK
2102         case CHIP_BONAIRE:
2103         case CHIP_HAWAII:
2104         case CHIP_KAVERI:
2105         case CHIP_KABINI:
2106         case CHIP_MULLINS:
2107                 if (adev->flags & AMD_IS_APU)
2108                         adev->family = AMDGPU_FAMILY_KV;
2109                 else
2110                         adev->family = AMDGPU_FAMILY_CI;
2111
2112                 r = cik_set_ip_blocks(adev);
2113                 if (r)
2114                         return r;
2115                 break;
2116 #endif
2117         case CHIP_TOPAZ:
2118         case CHIP_TONGA:
2119         case CHIP_FIJI:
2120         case CHIP_POLARIS10:
2121         case CHIP_POLARIS11:
2122         case CHIP_POLARIS12:
2123         case CHIP_VEGAM:
2124         case CHIP_CARRIZO:
2125         case CHIP_STONEY:
2126                 if (adev->flags & AMD_IS_APU)
2127                         adev->family = AMDGPU_FAMILY_CZ;
2128                 else
2129                         adev->family = AMDGPU_FAMILY_VI;
2130
2131                 r = vi_set_ip_blocks(adev);
2132                 if (r)
2133                         return r;
2134                 break;
2135         default:
2136                 r = amdgpu_discovery_set_ip_blocks(adev);
2137                 if (r)
2138                         return r;
2139                 break;
2140         }
2141
2142         if (amdgpu_has_atpx() &&
2143             (amdgpu_is_atpx_hybrid() ||
2144              amdgpu_has_atpx_dgpu_power_cntl()) &&
2145             ((adev->flags & AMD_IS_APU) == 0) &&
2146             !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2147                 adev->flags |= AMD_IS_PX;
2148
2149         if (!(adev->flags & AMD_IS_APU)) {
2150                 parent = pci_upstream_bridge(adev->pdev);
2151                 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2152         }
2153
2154         amdgpu_amdkfd_device_probe(adev);
2155
2156         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2157         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2158                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2159         if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2160                 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2161
2162         for (i = 0; i < adev->num_ip_blocks; i++) {
2163                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2164                         DRM_ERROR("disabled ip block: %d <%s>\n",
2165                                   i, adev->ip_blocks[i].version->funcs->name);
2166                         adev->ip_blocks[i].status.valid = false;
2167                 } else {
2168                         if (adev->ip_blocks[i].version->funcs->early_init) {
2169                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2170                                 if (r == -ENOENT) {
2171                                         adev->ip_blocks[i].status.valid = false;
2172                                 } else if (r) {
2173                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2174                                                   adev->ip_blocks[i].version->funcs->name, r);
2175                                         return r;
2176                                 } else {
2177                                         adev->ip_blocks[i].status.valid = true;
2178                                 }
2179                         } else {
2180                                 adev->ip_blocks[i].status.valid = true;
2181                         }
2182                 }
2183                 /* get the vbios after the asic_funcs are set up */
2184                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2185                         r = amdgpu_device_parse_gpu_info_fw(adev);
2186                         if (r)
2187                                 return r;
2188
2189                         /* Read BIOS */
2190                         if (!amdgpu_get_bios(adev))
2191                                 return -EINVAL;
2192
2193                         r = amdgpu_atombios_init(adev);
2194                         if (r) {
2195                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2196                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2197                                 return r;
2198                         }
2199
2200                         /*get pf2vf msg info at it's earliest time*/
2201                         if (amdgpu_sriov_vf(adev))
2202                                 amdgpu_virt_init_data_exchange(adev);
2203
2204                 }
2205         }
2206
2207         adev->cg_flags &= amdgpu_cg_mask;
2208         adev->pg_flags &= amdgpu_pg_mask;
2209
2210         return 0;
2211 }
2212
2213 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2214 {
2215         int i, r;
2216
2217         for (i = 0; i < adev->num_ip_blocks; i++) {
2218                 if (!adev->ip_blocks[i].status.sw)
2219                         continue;
2220                 if (adev->ip_blocks[i].status.hw)
2221                         continue;
2222                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2223                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2224                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2225                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2226                         if (r) {
2227                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2228                                           adev->ip_blocks[i].version->funcs->name, r);
2229                                 return r;
2230                         }
2231                         adev->ip_blocks[i].status.hw = true;
2232                 }
2233         }
2234
2235         return 0;
2236 }
2237
2238 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2239 {
2240         int i, r;
2241
2242         for (i = 0; i < adev->num_ip_blocks; i++) {
2243                 if (!adev->ip_blocks[i].status.sw)
2244                         continue;
2245                 if (adev->ip_blocks[i].status.hw)
2246                         continue;
2247                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2248                 if (r) {
2249                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2250                                   adev->ip_blocks[i].version->funcs->name, r);
2251                         return r;
2252                 }
2253                 adev->ip_blocks[i].status.hw = true;
2254         }
2255
2256         return 0;
2257 }
2258
2259 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2260 {
2261         int r = 0;
2262         int i;
2263         uint32_t smu_version;
2264
2265         if (adev->asic_type >= CHIP_VEGA10) {
2266                 for (i = 0; i < adev->num_ip_blocks; i++) {
2267                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2268                                 continue;
2269
2270                         if (!adev->ip_blocks[i].status.sw)
2271                                 continue;
2272
2273                         /* no need to do the fw loading again if already done*/
2274                         if (adev->ip_blocks[i].status.hw == true)
2275                                 break;
2276
2277                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2278                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2279                                 if (r) {
2280                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2281                                                           adev->ip_blocks[i].version->funcs->name, r);
2282                                         return r;
2283                                 }
2284                         } else {
2285                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2286                                 if (r) {
2287                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2288                                                           adev->ip_blocks[i].version->funcs->name, r);
2289                                         return r;
2290                                 }
2291                         }
2292
2293                         adev->ip_blocks[i].status.hw = true;
2294                         break;
2295                 }
2296         }
2297
2298         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2299                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2300
2301         return r;
2302 }
2303
2304 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2305 {
2306         long timeout;
2307         int r, i;
2308
2309         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2310                 struct amdgpu_ring *ring = adev->rings[i];
2311
2312                 /* No need to setup the GPU scheduler for rings that don't need it */
2313                 if (!ring || ring->no_scheduler)
2314                         continue;
2315
2316                 switch (ring->funcs->type) {
2317                 case AMDGPU_RING_TYPE_GFX:
2318                         timeout = adev->gfx_timeout;
2319                         break;
2320                 case AMDGPU_RING_TYPE_COMPUTE:
2321                         timeout = adev->compute_timeout;
2322                         break;
2323                 case AMDGPU_RING_TYPE_SDMA:
2324                         timeout = adev->sdma_timeout;
2325                         break;
2326                 default:
2327                         timeout = adev->video_timeout;
2328                         break;
2329                 }
2330
2331                 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2332                                    ring->num_hw_submission, amdgpu_job_hang_limit,
2333                                    timeout, adev->reset_domain->wq,
2334                                    ring->sched_score, ring->name,
2335                                    adev->dev);
2336                 if (r) {
2337                         DRM_ERROR("Failed to create scheduler on ring %s.\n",
2338                                   ring->name);
2339                         return r;
2340                 }
2341         }
2342
2343         return 0;
2344 }
2345
2346
2347 /**
2348  * amdgpu_device_ip_init - run init for hardware IPs
2349  *
2350  * @adev: amdgpu_device pointer
2351  *
2352  * Main initialization pass for hardware IPs.  The list of all the hardware
2353  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2354  * are run.  sw_init initializes the software state associated with each IP
2355  * and hw_init initializes the hardware associated with each IP.
2356  * Returns 0 on success, negative error code on failure.
2357  */
2358 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2359 {
2360         int i, r;
2361
2362         r = amdgpu_ras_init(adev);
2363         if (r)
2364                 return r;
2365
2366         for (i = 0; i < adev->num_ip_blocks; i++) {
2367                 if (!adev->ip_blocks[i].status.valid)
2368                         continue;
2369                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2370                 if (r) {
2371                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2372                                   adev->ip_blocks[i].version->funcs->name, r);
2373                         goto init_failed;
2374                 }
2375                 adev->ip_blocks[i].status.sw = true;
2376
2377                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2378                         /* need to do common hw init early so everything is set up for gmc */
2379                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2380                         if (r) {
2381                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2382                                 goto init_failed;
2383                         }
2384                         adev->ip_blocks[i].status.hw = true;
2385                 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2386                         /* need to do gmc hw init early so we can allocate gpu mem */
2387                         /* Try to reserve bad pages early */
2388                         if (amdgpu_sriov_vf(adev))
2389                                 amdgpu_virt_exchange_data(adev);
2390
2391                         r = amdgpu_device_mem_scratch_init(adev);
2392                         if (r) {
2393                                 DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
2394                                 goto init_failed;
2395                         }
2396                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2397                         if (r) {
2398                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2399                                 goto init_failed;
2400                         }
2401                         r = amdgpu_device_wb_init(adev);
2402                         if (r) {
2403                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2404                                 goto init_failed;
2405                         }
2406                         adev->ip_blocks[i].status.hw = true;
2407
2408                         /* right after GMC hw init, we create CSA */
2409                         if (amdgpu_mcbp) {
2410                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2411                                                                AMDGPU_GEM_DOMAIN_VRAM |
2412                                                                AMDGPU_GEM_DOMAIN_GTT,
2413                                                                AMDGPU_CSA_SIZE);
2414                                 if (r) {
2415                                         DRM_ERROR("allocate CSA failed %d\n", r);
2416                                         goto init_failed;
2417                                 }
2418                         }
2419                 }
2420         }
2421
2422         if (amdgpu_sriov_vf(adev))
2423                 amdgpu_virt_init_data_exchange(adev);
2424
2425         r = amdgpu_ib_pool_init(adev);
2426         if (r) {
2427                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2428                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2429                 goto init_failed;
2430         }
2431
2432         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2433         if (r)
2434                 goto init_failed;
2435
2436         r = amdgpu_device_ip_hw_init_phase1(adev);
2437         if (r)
2438                 goto init_failed;
2439
2440         r = amdgpu_device_fw_loading(adev);
2441         if (r)
2442                 goto init_failed;
2443
2444         r = amdgpu_device_ip_hw_init_phase2(adev);
2445         if (r)
2446                 goto init_failed;
2447
2448         /*
2449          * retired pages will be loaded from eeprom and reserved here,
2450          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2451          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2452          * for I2C communication which only true at this point.
2453          *
2454          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2455          * failure from bad gpu situation and stop amdgpu init process
2456          * accordingly. For other failed cases, it will still release all
2457          * the resource and print error message, rather than returning one
2458          * negative value to upper level.
2459          *
2460          * Note: theoretically, this should be called before all vram allocations
2461          * to protect retired page from abusing
2462          */
2463         r = amdgpu_ras_recovery_init(adev);
2464         if (r)
2465                 goto init_failed;
2466
2467         /**
2468          * In case of XGMI grab extra reference for reset domain for this device
2469          */
2470         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2471                 if (amdgpu_xgmi_add_device(adev) == 0) {
2472                         if (!amdgpu_sriov_vf(adev)) {
2473                                 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2474
2475                                 if (WARN_ON(!hive)) {
2476                                         r = -ENOENT;
2477                                         goto init_failed;
2478                                 }
2479
2480                                 if (!hive->reset_domain ||
2481                                     !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2482                                         r = -ENOENT;
2483                                         amdgpu_put_xgmi_hive(hive);
2484                                         goto init_failed;
2485                                 }
2486
2487                                 /* Drop the early temporary reset domain we created for device */
2488                                 amdgpu_reset_put_reset_domain(adev->reset_domain);
2489                                 adev->reset_domain = hive->reset_domain;
2490                                 amdgpu_put_xgmi_hive(hive);
2491                         }
2492                 }
2493         }
2494
2495         r = amdgpu_device_init_schedulers(adev);
2496         if (r)
2497                 goto init_failed;
2498
2499         /* Don't init kfd if whole hive need to be reset during init */
2500         if (!adev->gmc.xgmi.pending_reset)
2501                 amdgpu_amdkfd_device_init(adev);
2502
2503         amdgpu_fru_get_product_info(adev);
2504
2505 init_failed:
2506         if (amdgpu_sriov_vf(adev))
2507                 amdgpu_virt_release_full_gpu(adev, true);
2508
2509         return r;
2510 }
2511
2512 /**
2513  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2514  *
2515  * @adev: amdgpu_device pointer
2516  *
2517  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2518  * this function before a GPU reset.  If the value is retained after a
2519  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2520  */
2521 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2522 {
2523         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2524 }
2525
2526 /**
2527  * amdgpu_device_check_vram_lost - check if vram is valid
2528  *
2529  * @adev: amdgpu_device pointer
2530  *
2531  * Checks the reset magic value written to the gart pointer in VRAM.
2532  * The driver calls this after a GPU reset to see if the contents of
2533  * VRAM is lost or now.
2534  * returns true if vram is lost, false if not.
2535  */
2536 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2537 {
2538         if (memcmp(adev->gart.ptr, adev->reset_magic,
2539                         AMDGPU_RESET_MAGIC_NUM))
2540                 return true;
2541
2542         if (!amdgpu_in_reset(adev))
2543                 return false;
2544
2545         /*
2546          * For all ASICs with baco/mode1 reset, the VRAM is
2547          * always assumed to be lost.
2548          */
2549         switch (amdgpu_asic_reset_method(adev)) {
2550         case AMD_RESET_METHOD_BACO:
2551         case AMD_RESET_METHOD_MODE1:
2552                 return true;
2553         default:
2554                 return false;
2555         }
2556 }
2557
2558 /**
2559  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2560  *
2561  * @adev: amdgpu_device pointer
2562  * @state: clockgating state (gate or ungate)
2563  *
2564  * The list of all the hardware IPs that make up the asic is walked and the
2565  * set_clockgating_state callbacks are run.
2566  * Late initialization pass enabling clockgating for hardware IPs.
2567  * Fini or suspend, pass disabling clockgating for hardware IPs.
2568  * Returns 0 on success, negative error code on failure.
2569  */
2570
2571 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2572                                enum amd_clockgating_state state)
2573 {
2574         int i, j, r;
2575
2576         if (amdgpu_emu_mode == 1)
2577                 return 0;
2578
2579         for (j = 0; j < adev->num_ip_blocks; j++) {
2580                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2581                 if (!adev->ip_blocks[i].status.late_initialized)
2582                         continue;
2583                 /* skip CG for GFX, SDMA on S0ix */
2584                 if (adev->in_s0ix &&
2585                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2586                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2587                         continue;
2588                 /* skip CG for VCE/UVD, it's handled specially */
2589                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2590                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2591                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2592                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2593                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2594                         /* enable clockgating to save power */
2595                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2596                                                                                      state);
2597                         if (r) {
2598                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2599                                           adev->ip_blocks[i].version->funcs->name, r);
2600                                 return r;
2601                         }
2602                 }
2603         }
2604
2605         return 0;
2606 }
2607
2608 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2609                                enum amd_powergating_state state)
2610 {
2611         int i, j, r;
2612
2613         if (amdgpu_emu_mode == 1)
2614                 return 0;
2615
2616         for (j = 0; j < adev->num_ip_blocks; j++) {
2617                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2618                 if (!adev->ip_blocks[i].status.late_initialized)
2619                         continue;
2620                 /* skip PG for GFX, SDMA on S0ix */
2621                 if (adev->in_s0ix &&
2622                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2623                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2624                         continue;
2625                 /* skip CG for VCE/UVD, it's handled specially */
2626                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2627                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2628                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2629                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2630                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2631                         /* enable powergating to save power */
2632                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2633                                                                                         state);
2634                         if (r) {
2635                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2636                                           adev->ip_blocks[i].version->funcs->name, r);
2637                                 return r;
2638                         }
2639                 }
2640         }
2641         return 0;
2642 }
2643
2644 static int amdgpu_device_enable_mgpu_fan_boost(void)
2645 {
2646         struct amdgpu_gpu_instance *gpu_ins;
2647         struct amdgpu_device *adev;
2648         int i, ret = 0;
2649
2650         mutex_lock(&mgpu_info.mutex);
2651
2652         /*
2653          * MGPU fan boost feature should be enabled
2654          * only when there are two or more dGPUs in
2655          * the system
2656          */
2657         if (mgpu_info.num_dgpu < 2)
2658                 goto out;
2659
2660         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2661                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2662                 adev = gpu_ins->adev;
2663                 if (!(adev->flags & AMD_IS_APU) &&
2664                     !gpu_ins->mgpu_fan_enabled) {
2665                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2666                         if (ret)
2667                                 break;
2668
2669                         gpu_ins->mgpu_fan_enabled = 1;
2670                 }
2671         }
2672
2673 out:
2674         mutex_unlock(&mgpu_info.mutex);
2675
2676         return ret;
2677 }
2678
2679 /**
2680  * amdgpu_device_ip_late_init - run late init for hardware IPs
2681  *
2682  * @adev: amdgpu_device pointer
2683  *
2684  * Late initialization pass for hardware IPs.  The list of all the hardware
2685  * IPs that make up the asic is walked and the late_init callbacks are run.
2686  * late_init covers any special initialization that an IP requires
2687  * after all of the have been initialized or something that needs to happen
2688  * late in the init process.
2689  * Returns 0 on success, negative error code on failure.
2690  */
2691 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2692 {
2693         struct amdgpu_gpu_instance *gpu_instance;
2694         int i = 0, r;
2695
2696         for (i = 0; i < adev->num_ip_blocks; i++) {
2697                 if (!adev->ip_blocks[i].status.hw)
2698                         continue;
2699                 if (adev->ip_blocks[i].version->funcs->late_init) {
2700                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2701                         if (r) {
2702                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2703                                           adev->ip_blocks[i].version->funcs->name, r);
2704                                 return r;
2705                         }
2706                 }
2707                 adev->ip_blocks[i].status.late_initialized = true;
2708         }
2709
2710         r = amdgpu_ras_late_init(adev);
2711         if (r) {
2712                 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2713                 return r;
2714         }
2715
2716         amdgpu_ras_set_error_query_ready(adev, true);
2717
2718         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2719         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2720
2721         amdgpu_device_fill_reset_magic(adev);
2722
2723         r = amdgpu_device_enable_mgpu_fan_boost();
2724         if (r)
2725                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2726
2727         /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2728         if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2729                                adev->asic_type == CHIP_ALDEBARAN ))
2730                 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2731
2732         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2733                 mutex_lock(&mgpu_info.mutex);
2734
2735                 /*
2736                  * Reset device p-state to low as this was booted with high.
2737                  *
2738                  * This should be performed only after all devices from the same
2739                  * hive get initialized.
2740                  *
2741                  * However, it's unknown how many device in the hive in advance.
2742                  * As this is counted one by one during devices initializations.
2743                  *
2744                  * So, we wait for all XGMI interlinked devices initialized.
2745                  * This may bring some delays as those devices may come from
2746                  * different hives. But that should be OK.
2747                  */
2748                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2749                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2750                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2751                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2752                                         continue;
2753
2754                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2755                                                 AMDGPU_XGMI_PSTATE_MIN);
2756                                 if (r) {
2757                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2758                                         break;
2759                                 }
2760                         }
2761                 }
2762
2763                 mutex_unlock(&mgpu_info.mutex);
2764         }
2765
2766         return 0;
2767 }
2768
2769 /**
2770  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2771  *
2772  * @adev: amdgpu_device pointer
2773  *
2774  * For ASICs need to disable SMC first
2775  */
2776 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2777 {
2778         int i, r;
2779
2780         if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2781                 return;
2782
2783         for (i = 0; i < adev->num_ip_blocks; i++) {
2784                 if (!adev->ip_blocks[i].status.hw)
2785                         continue;
2786                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2787                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2788                         /* XXX handle errors */
2789                         if (r) {
2790                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2791                                           adev->ip_blocks[i].version->funcs->name, r);
2792                         }
2793                         adev->ip_blocks[i].status.hw = false;
2794                         break;
2795                 }
2796         }
2797 }
2798
2799 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2800 {
2801         int i, r;
2802
2803         for (i = 0; i < adev->num_ip_blocks; i++) {
2804                 if (!adev->ip_blocks[i].version->funcs->early_fini)
2805                         continue;
2806
2807                 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2808                 if (r) {
2809                         DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2810                                   adev->ip_blocks[i].version->funcs->name, r);
2811                 }
2812         }
2813
2814         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2815         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2816
2817         amdgpu_amdkfd_suspend(adev, false);
2818
2819         /* Workaroud for ASICs need to disable SMC first */
2820         amdgpu_device_smu_fini_early(adev);
2821
2822         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2823                 if (!adev->ip_blocks[i].status.hw)
2824                         continue;
2825
2826                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2827                 /* XXX handle errors */
2828                 if (r) {
2829                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2830                                   adev->ip_blocks[i].version->funcs->name, r);
2831                 }
2832
2833                 adev->ip_blocks[i].status.hw = false;
2834         }
2835
2836         if (amdgpu_sriov_vf(adev)) {
2837                 if (amdgpu_virt_release_full_gpu(adev, false))
2838                         DRM_ERROR("failed to release exclusive mode on fini\n");
2839         }
2840
2841         return 0;
2842 }
2843
2844 /**
2845  * amdgpu_device_ip_fini - run fini for hardware IPs
2846  *
2847  * @adev: amdgpu_device pointer
2848  *
2849  * Main teardown pass for hardware IPs.  The list of all the hardware
2850  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2851  * are run.  hw_fini tears down the hardware associated with each IP
2852  * and sw_fini tears down any software state associated with each IP.
2853  * Returns 0 on success, negative error code on failure.
2854  */
2855 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2856 {
2857         int i, r;
2858
2859         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2860                 amdgpu_virt_release_ras_err_handler_data(adev);
2861
2862         if (adev->gmc.xgmi.num_physical_nodes > 1)
2863                 amdgpu_xgmi_remove_device(adev);
2864
2865         amdgpu_amdkfd_device_fini_sw(adev);
2866
2867         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2868                 if (!adev->ip_blocks[i].status.sw)
2869                         continue;
2870
2871                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2872                         amdgpu_ucode_free_bo(adev);
2873                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2874                         amdgpu_device_wb_fini(adev);
2875                         amdgpu_device_mem_scratch_fini(adev);
2876                         amdgpu_ib_pool_fini(adev);
2877                 }
2878
2879                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2880                 /* XXX handle errors */
2881                 if (r) {
2882                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2883                                   adev->ip_blocks[i].version->funcs->name, r);
2884                 }
2885                 adev->ip_blocks[i].status.sw = false;
2886                 adev->ip_blocks[i].status.valid = false;
2887         }
2888
2889         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2890                 if (!adev->ip_blocks[i].status.late_initialized)
2891                         continue;
2892                 if (adev->ip_blocks[i].version->funcs->late_fini)
2893                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2894                 adev->ip_blocks[i].status.late_initialized = false;
2895         }
2896
2897         amdgpu_ras_fini(adev);
2898
2899         return 0;
2900 }
2901
2902 /**
2903  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2904  *
2905  * @work: work_struct.
2906  */
2907 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2908 {
2909         struct amdgpu_device *adev =
2910                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2911         int r;
2912
2913         r = amdgpu_ib_ring_tests(adev);
2914         if (r)
2915                 DRM_ERROR("ib ring test failed (%d).\n", r);
2916 }
2917
2918 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2919 {
2920         struct amdgpu_device *adev =
2921                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2922
2923         WARN_ON_ONCE(adev->gfx.gfx_off_state);
2924         WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2925
2926         if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2927                 adev->gfx.gfx_off_state = true;
2928 }
2929
2930 /**
2931  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2932  *
2933  * @adev: amdgpu_device pointer
2934  *
2935  * Main suspend function for hardware IPs.  The list of all the hardware
2936  * IPs that make up the asic is walked, clockgating is disabled and the
2937  * suspend callbacks are run.  suspend puts the hardware and software state
2938  * in each IP into a state suitable for suspend.
2939  * Returns 0 on success, negative error code on failure.
2940  */
2941 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2942 {
2943         int i, r;
2944
2945         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2946         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2947
2948         /*
2949          * Per PMFW team's suggestion, driver needs to handle gfxoff
2950          * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2951          * scenario. Add the missing df cstate disablement here.
2952          */
2953         if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2954                 dev_warn(adev->dev, "Failed to disallow df cstate");
2955
2956         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2957                 if (!adev->ip_blocks[i].status.valid)
2958                         continue;
2959
2960                 /* displays are handled separately */
2961                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2962                         continue;
2963
2964                 /* XXX handle errors */
2965                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2966                 /* XXX handle errors */
2967                 if (r) {
2968                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2969                                   adev->ip_blocks[i].version->funcs->name, r);
2970                         return r;
2971                 }
2972
2973                 adev->ip_blocks[i].status.hw = false;
2974         }
2975
2976         return 0;
2977 }
2978
2979 /**
2980  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2981  *
2982  * @adev: amdgpu_device pointer
2983  *
2984  * Main suspend function for hardware IPs.  The list of all the hardware
2985  * IPs that make up the asic is walked, clockgating is disabled and the
2986  * suspend callbacks are run.  suspend puts the hardware and software state
2987  * in each IP into a state suitable for suspend.
2988  * Returns 0 on success, negative error code on failure.
2989  */
2990 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2991 {
2992         int i, r;
2993
2994         if (adev->in_s0ix)
2995                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2996
2997         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2998                 if (!adev->ip_blocks[i].status.valid)
2999                         continue;
3000                 /* displays are handled in phase1 */
3001                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3002                         continue;
3003                 /* PSP lost connection when err_event_athub occurs */
3004                 if (amdgpu_ras_intr_triggered() &&
3005                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3006                         adev->ip_blocks[i].status.hw = false;
3007                         continue;
3008                 }
3009
3010                 /* skip unnecessary suspend if we do not initialize them yet */
3011                 if (adev->gmc.xgmi.pending_reset &&
3012                     !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3013                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3014                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3015                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3016                         adev->ip_blocks[i].status.hw = false;
3017                         continue;
3018                 }
3019
3020                 /* skip suspend of gfx/mes and psp for S0ix
3021                  * gfx is in gfxoff state, so on resume it will exit gfxoff just
3022                  * like at runtime. PSP is also part of the always on hardware
3023                  * so no need to suspend it.
3024                  */
3025                 if (adev->in_s0ix &&
3026                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3027                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3028                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3029                         continue;
3030
3031                 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3032                 if (adev->in_s0ix &&
3033                     (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&
3034                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
3035                         continue;
3036
3037                 /* XXX handle errors */
3038                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3039                 /* XXX handle errors */
3040                 if (r) {
3041                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
3042                                   adev->ip_blocks[i].version->funcs->name, r);
3043                 }
3044                 adev->ip_blocks[i].status.hw = false;
3045                 /* handle putting the SMC in the appropriate state */
3046                 if(!amdgpu_sriov_vf(adev)){
3047                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3048                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3049                                 if (r) {
3050                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3051                                                         adev->mp1_state, r);
3052                                         return r;
3053                                 }
3054                         }
3055                 }
3056         }
3057
3058         return 0;
3059 }
3060
3061 /**
3062  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3063  *
3064  * @adev: amdgpu_device pointer
3065  *
3066  * Main suspend function for hardware IPs.  The list of all the hardware
3067  * IPs that make up the asic is walked, clockgating is disabled and the
3068  * suspend callbacks are run.  suspend puts the hardware and software state
3069  * in each IP into a state suitable for suspend.
3070  * Returns 0 on success, negative error code on failure.
3071  */
3072 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3073 {
3074         int r;
3075
3076         if (amdgpu_sriov_vf(adev)) {
3077                 amdgpu_virt_fini_data_exchange(adev);
3078                 amdgpu_virt_request_full_gpu(adev, false);
3079         }
3080
3081         r = amdgpu_device_ip_suspend_phase1(adev);
3082         if (r)
3083                 return r;
3084         r = amdgpu_device_ip_suspend_phase2(adev);
3085
3086         if (amdgpu_sriov_vf(adev))
3087                 amdgpu_virt_release_full_gpu(adev, false);
3088
3089         return r;
3090 }
3091
3092 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3093 {
3094         int i, r;
3095
3096         static enum amd_ip_block_type ip_order[] = {
3097                 AMD_IP_BLOCK_TYPE_COMMON,
3098                 AMD_IP_BLOCK_TYPE_GMC,
3099                 AMD_IP_BLOCK_TYPE_PSP,
3100                 AMD_IP_BLOCK_TYPE_IH,
3101         };
3102
3103         for (i = 0; i < adev->num_ip_blocks; i++) {
3104                 int j;
3105                 struct amdgpu_ip_block *block;
3106
3107                 block = &adev->ip_blocks[i];
3108                 block->status.hw = false;
3109
3110                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3111
3112                         if (block->version->type != ip_order[j] ||
3113                                 !block->status.valid)
3114                                 continue;
3115
3116                         r = block->version->funcs->hw_init(adev);
3117                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3118                         if (r)
3119                                 return r;
3120                         block->status.hw = true;
3121                 }
3122         }
3123
3124         return 0;
3125 }
3126
3127 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3128 {
3129         int i, r;
3130
3131         static enum amd_ip_block_type ip_order[] = {
3132                 AMD_IP_BLOCK_TYPE_SMC,
3133                 AMD_IP_BLOCK_TYPE_DCE,
3134                 AMD_IP_BLOCK_TYPE_GFX,
3135                 AMD_IP_BLOCK_TYPE_SDMA,
3136                 AMD_IP_BLOCK_TYPE_UVD,
3137                 AMD_IP_BLOCK_TYPE_VCE,
3138                 AMD_IP_BLOCK_TYPE_VCN
3139         };
3140
3141         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3142                 int j;
3143                 struct amdgpu_ip_block *block;
3144
3145                 for (j = 0; j < adev->num_ip_blocks; j++) {
3146                         block = &adev->ip_blocks[j];
3147
3148                         if (block->version->type != ip_order[i] ||
3149                                 !block->status.valid ||
3150                                 block->status.hw)
3151                                 continue;
3152
3153                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3154                                 r = block->version->funcs->resume(adev);
3155                         else
3156                                 r = block->version->funcs->hw_init(adev);
3157
3158                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3159                         if (r)
3160                                 return r;
3161                         block->status.hw = true;
3162                 }
3163         }
3164
3165         return 0;
3166 }
3167
3168 /**
3169  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3170  *
3171  * @adev: amdgpu_device pointer
3172  *
3173  * First resume function for hardware IPs.  The list of all the hardware
3174  * IPs that make up the asic is walked and the resume callbacks are run for
3175  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3176  * after a suspend and updates the software state as necessary.  This
3177  * function is also used for restoring the GPU after a GPU reset.
3178  * Returns 0 on success, negative error code on failure.
3179  */
3180 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3181 {
3182         int i, r;
3183
3184         for (i = 0; i < adev->num_ip_blocks; i++) {
3185                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3186                         continue;
3187                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3188                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3189                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3190                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3191
3192                         r = adev->ip_blocks[i].version->funcs->resume(adev);
3193                         if (r) {
3194                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
3195                                           adev->ip_blocks[i].version->funcs->name, r);
3196                                 return r;
3197                         }
3198                         adev->ip_blocks[i].status.hw = true;
3199                 }
3200         }
3201
3202         return 0;
3203 }
3204
3205 /**
3206  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3207  *
3208  * @adev: amdgpu_device pointer
3209  *
3210  * First resume function for hardware IPs.  The list of all the hardware
3211  * IPs that make up the asic is walked and the resume callbacks are run for
3212  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3213  * functional state after a suspend and updates the software state as
3214  * necessary.  This function is also used for restoring the GPU after a GPU
3215  * reset.
3216  * Returns 0 on success, negative error code on failure.
3217  */
3218 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3219 {
3220         int i, r;
3221
3222         for (i = 0; i < adev->num_ip_blocks; i++) {
3223                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3224                         continue;
3225                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3226                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3227                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3228                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3229                         continue;
3230                 r = adev->ip_blocks[i].version->funcs->resume(adev);
3231                 if (r) {
3232                         DRM_ERROR("resume of IP block <%s> failed %d\n",
3233                                   adev->ip_blocks[i].version->funcs->name, r);
3234                         return r;
3235                 }
3236                 adev->ip_blocks[i].status.hw = true;
3237         }
3238
3239         return 0;
3240 }
3241
3242 /**
3243  * amdgpu_device_ip_resume - run resume for hardware IPs
3244  *
3245  * @adev: amdgpu_device pointer
3246  *
3247  * Main resume function for hardware IPs.  The hardware IPs
3248  * are split into two resume functions because they are
3249  * are also used in in recovering from a GPU reset and some additional
3250  * steps need to be take between them.  In this case (S3/S4) they are
3251  * run sequentially.
3252  * Returns 0 on success, negative error code on failure.
3253  */
3254 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3255 {
3256         int r;
3257
3258         r = amdgpu_amdkfd_resume_iommu(adev);
3259         if (r)
3260                 return r;
3261
3262         r = amdgpu_device_ip_resume_phase1(adev);
3263         if (r)
3264                 return r;
3265
3266         r = amdgpu_device_fw_loading(adev);
3267         if (r)
3268                 return r;
3269
3270         r = amdgpu_device_ip_resume_phase2(adev);
3271
3272         return r;
3273 }
3274
3275 /**
3276  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3277  *
3278  * @adev: amdgpu_device pointer
3279  *
3280  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3281  */
3282 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3283 {
3284         if (amdgpu_sriov_vf(adev)) {
3285                 if (adev->is_atom_fw) {
3286                         if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3287                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3288                 } else {
3289                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3290                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3291                 }
3292
3293                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3294                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3295         }
3296 }
3297
3298 /**
3299  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3300  *
3301  * @asic_type: AMD asic type
3302  *
3303  * Check if there is DC (new modesetting infrastructre) support for an asic.
3304  * returns true if DC has support, false if not.
3305  */
3306 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3307 {
3308         switch (asic_type) {
3309 #ifdef CONFIG_DRM_AMDGPU_SI
3310         case CHIP_HAINAN:
3311 #endif
3312         case CHIP_TOPAZ:
3313                 /* chips with no display hardware */
3314                 return false;
3315 #if defined(CONFIG_DRM_AMD_DC)
3316         case CHIP_TAHITI:
3317         case CHIP_PITCAIRN:
3318         case CHIP_VERDE:
3319         case CHIP_OLAND:
3320                 /*
3321                  * We have systems in the wild with these ASICs that require
3322                  * LVDS and VGA support which is not supported with DC.
3323                  *
3324                  * Fallback to the non-DC driver here by default so as not to
3325                  * cause regressions.
3326                  */
3327 #if defined(CONFIG_DRM_AMD_DC_SI)
3328                 return amdgpu_dc > 0;
3329 #else
3330                 return false;
3331 #endif
3332         case CHIP_BONAIRE:
3333         case CHIP_KAVERI:
3334         case CHIP_KABINI:
3335         case CHIP_MULLINS:
3336                 /*
3337                  * We have systems in the wild with these ASICs that require
3338                  * VGA support which is not supported with DC.
3339                  *
3340                  * Fallback to the non-DC driver here by default so as not to
3341                  * cause regressions.
3342                  */
3343                 return amdgpu_dc > 0;
3344         default:
3345                 return amdgpu_dc != 0;
3346 #else
3347         default:
3348                 if (amdgpu_dc > 0)
3349                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3350                                          "but isn't supported by ASIC, ignoring\n");
3351                 return false;
3352 #endif
3353         }
3354 }
3355
3356 /**
3357  * amdgpu_device_has_dc_support - check if dc is supported
3358  *
3359  * @adev: amdgpu_device pointer
3360  *
3361  * Returns true for supported, false for not supported
3362  */
3363 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3364 {
3365         if (adev->enable_virtual_display ||
3366             (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3367                 return false;
3368
3369         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3370 }
3371
3372 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3373 {
3374         struct amdgpu_device *adev =
3375                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3376         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3377
3378         /* It's a bug to not have a hive within this function */
3379         if (WARN_ON(!hive))
3380                 return;
3381
3382         /*
3383          * Use task barrier to synchronize all xgmi reset works across the
3384          * hive. task_barrier_enter and task_barrier_exit will block
3385          * until all the threads running the xgmi reset works reach
3386          * those points. task_barrier_full will do both blocks.
3387          */
3388         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3389
3390                 task_barrier_enter(&hive->tb);
3391                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3392
3393                 if (adev->asic_reset_res)
3394                         goto fail;
3395
3396                 task_barrier_exit(&hive->tb);
3397                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3398
3399                 if (adev->asic_reset_res)
3400                         goto fail;
3401
3402                 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3403                     adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3404                         adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3405         } else {
3406
3407                 task_barrier_full(&hive->tb);
3408                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3409         }
3410
3411 fail:
3412         if (adev->asic_reset_res)
3413                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3414                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3415         amdgpu_put_xgmi_hive(hive);
3416 }
3417
3418 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3419 {
3420         char *input = amdgpu_lockup_timeout;
3421         char *timeout_setting = NULL;
3422         int index = 0;
3423         long timeout;
3424         int ret = 0;
3425
3426         /*
3427          * By default timeout for non compute jobs is 10000
3428          * and 60000 for compute jobs.
3429          * In SR-IOV or passthrough mode, timeout for compute
3430          * jobs are 60000 by default.
3431          */
3432         adev->gfx_timeout = msecs_to_jiffies(10000);
3433         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3434         if (amdgpu_sriov_vf(adev))
3435                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3436                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3437         else
3438                 adev->compute_timeout =  msecs_to_jiffies(60000);
3439
3440         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3441                 while ((timeout_setting = strsep(&input, ",")) &&
3442                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3443                         ret = kstrtol(timeout_setting, 0, &timeout);
3444                         if (ret)
3445                                 return ret;
3446
3447                         if (timeout == 0) {
3448                                 index++;
3449                                 continue;
3450                         } else if (timeout < 0) {
3451                                 timeout = MAX_SCHEDULE_TIMEOUT;
3452                                 dev_warn(adev->dev, "lockup timeout disabled");
3453                                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3454                         } else {
3455                                 timeout = msecs_to_jiffies(timeout);
3456                         }
3457
3458                         switch (index++) {
3459                         case 0:
3460                                 adev->gfx_timeout = timeout;
3461                                 break;
3462                         case 1:
3463                                 adev->compute_timeout = timeout;
3464                                 break;
3465                         case 2:
3466                                 adev->sdma_timeout = timeout;
3467                                 break;
3468                         case 3:
3469                                 adev->video_timeout = timeout;
3470                                 break;
3471                         default:
3472                                 break;
3473                         }
3474                 }
3475                 /*
3476                  * There is only one value specified and
3477                  * it should apply to all non-compute jobs.
3478                  */
3479                 if (index == 1) {
3480                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3481                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3482                                 adev->compute_timeout = adev->gfx_timeout;
3483                 }
3484         }
3485
3486         return ret;
3487 }
3488
3489 /**
3490  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3491  *
3492  * @adev: amdgpu_device pointer
3493  *
3494  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3495  */
3496 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3497 {
3498         struct iommu_domain *domain;
3499
3500         domain = iommu_get_domain_for_dev(adev->dev);
3501         if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3502                 adev->ram_is_direct_mapped = true;
3503 }
3504
3505 static const struct attribute *amdgpu_dev_attributes[] = {
3506         &dev_attr_product_name.attr,
3507         &dev_attr_product_number.attr,
3508         &dev_attr_serial_number.attr,
3509         &dev_attr_pcie_replay_count.attr,
3510         NULL
3511 };
3512
3513 /**
3514  * amdgpu_device_init - initialize the driver
3515  *
3516  * @adev: amdgpu_device pointer
3517  * @flags: driver flags
3518  *
3519  * Initializes the driver info and hw (all asics).
3520  * Returns 0 for success or an error on failure.
3521  * Called at driver startup.
3522  */
3523 int amdgpu_device_init(struct amdgpu_device *adev,
3524                        uint32_t flags)
3525 {
3526         struct drm_device *ddev = adev_to_drm(adev);
3527         struct pci_dev *pdev = adev->pdev;
3528         int r, i;
3529         bool px = false;
3530         u32 max_MBps;
3531
3532         adev->shutdown = false;
3533         adev->flags = flags;
3534
3535         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3536                 adev->asic_type = amdgpu_force_asic_type;
3537         else
3538                 adev->asic_type = flags & AMD_ASIC_MASK;
3539
3540         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3541         if (amdgpu_emu_mode == 1)
3542                 adev->usec_timeout *= 10;
3543         adev->gmc.gart_size = 512 * 1024 * 1024;
3544         adev->accel_working = false;
3545         adev->num_rings = 0;
3546         RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3547         adev->mman.buffer_funcs = NULL;
3548         adev->mman.buffer_funcs_ring = NULL;
3549         adev->vm_manager.vm_pte_funcs = NULL;
3550         adev->vm_manager.vm_pte_num_scheds = 0;
3551         adev->gmc.gmc_funcs = NULL;
3552         adev->harvest_ip_mask = 0x0;
3553         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3554         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3555
3556         adev->smc_rreg = &amdgpu_invalid_rreg;
3557         adev->smc_wreg = &amdgpu_invalid_wreg;
3558         adev->pcie_rreg = &amdgpu_invalid_rreg;
3559         adev->pcie_wreg = &amdgpu_invalid_wreg;
3560         adev->pciep_rreg = &amdgpu_invalid_rreg;
3561         adev->pciep_wreg = &amdgpu_invalid_wreg;
3562         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3563         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3564         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3565         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3566         adev->didt_rreg = &amdgpu_invalid_rreg;
3567         adev->didt_wreg = &amdgpu_invalid_wreg;
3568         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3569         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3570         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3571         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3572
3573         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3574                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3575                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3576
3577         /* mutex initialization are all done here so we
3578          * can recall function without having locking issues */
3579         mutex_init(&adev->firmware.mutex);
3580         mutex_init(&adev->pm.mutex);
3581         mutex_init(&adev->gfx.gpu_clock_mutex);
3582         mutex_init(&adev->srbm_mutex);
3583         mutex_init(&adev->gfx.pipe_reserve_mutex);
3584         mutex_init(&adev->gfx.gfx_off_mutex);
3585         mutex_init(&adev->grbm_idx_mutex);
3586         mutex_init(&adev->mn_lock);
3587         mutex_init(&adev->virt.vf_errors.lock);
3588         hash_init(adev->mn_hash);
3589         mutex_init(&adev->psp.mutex);
3590         mutex_init(&adev->notifier_lock);
3591         mutex_init(&adev->pm.stable_pstate_ctx_lock);
3592         mutex_init(&adev->benchmark_mutex);
3593
3594         amdgpu_device_init_apu_flags(adev);
3595
3596         r = amdgpu_device_check_arguments(adev);
3597         if (r)
3598                 return r;
3599
3600         spin_lock_init(&adev->mmio_idx_lock);
3601         spin_lock_init(&adev->smc_idx_lock);
3602         spin_lock_init(&adev->pcie_idx_lock);
3603         spin_lock_init(&adev->uvd_ctx_idx_lock);
3604         spin_lock_init(&adev->didt_idx_lock);
3605         spin_lock_init(&adev->gc_cac_idx_lock);
3606         spin_lock_init(&adev->se_cac_idx_lock);
3607         spin_lock_init(&adev->audio_endpt_idx_lock);
3608         spin_lock_init(&adev->mm_stats.lock);
3609
3610         INIT_LIST_HEAD(&adev->shadow_list);
3611         mutex_init(&adev->shadow_list_lock);
3612
3613         INIT_LIST_HEAD(&adev->reset_list);
3614
3615         INIT_LIST_HEAD(&adev->ras_list);
3616
3617         INIT_DELAYED_WORK(&adev->delayed_init_work,
3618                           amdgpu_device_delayed_init_work_handler);
3619         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3620                           amdgpu_device_delay_enable_gfx_off);
3621
3622         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3623
3624         adev->gfx.gfx_off_req_count = 1;
3625         adev->gfx.gfx_off_residency = 0;
3626         adev->gfx.gfx_off_entrycount = 0;
3627         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3628
3629         atomic_set(&adev->throttling_logging_enabled, 1);
3630         /*
3631          * If throttling continues, logging will be performed every minute
3632          * to avoid log flooding. "-1" is subtracted since the thermal
3633          * throttling interrupt comes every second. Thus, the total logging
3634          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3635          * for throttling interrupt) = 60 seconds.
3636          */
3637         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3638         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3639
3640         /* Registers mapping */
3641         /* TODO: block userspace mapping of io register */
3642         if (adev->asic_type >= CHIP_BONAIRE) {
3643                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3644                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3645         } else {
3646                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3647                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3648         }
3649
3650         for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3651                 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3652
3653         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3654         if (adev->rmmio == NULL) {
3655                 return -ENOMEM;
3656         }
3657         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3658         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3659
3660         amdgpu_device_get_pcie_info(adev);
3661
3662         if (amdgpu_mcbp)
3663                 DRM_INFO("MCBP is enabled\n");
3664
3665         /*
3666          * Reset domain needs to be present early, before XGMI hive discovered
3667          * (if any) and intitialized to use reset sem and in_gpu reset flag
3668          * early on during init and before calling to RREG32.
3669          */
3670         adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3671         if (!adev->reset_domain)
3672                 return -ENOMEM;
3673
3674         /* detect hw virtualization here */
3675         amdgpu_detect_virtualization(adev);
3676
3677         r = amdgpu_device_get_job_timeout_settings(adev);
3678         if (r) {
3679                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3680                 return r;
3681         }
3682
3683         /* early init functions */
3684         r = amdgpu_device_ip_early_init(adev);
3685         if (r)
3686                 return r;
3687
3688         /* Get rid of things like offb */
3689         r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
3690         if (r)
3691                 return r;
3692
3693         /* Enable TMZ based on IP_VERSION */
3694         amdgpu_gmc_tmz_set(adev);
3695
3696         amdgpu_gmc_noretry_set(adev);
3697         /* Need to get xgmi info early to decide the reset behavior*/
3698         if (adev->gmc.xgmi.supported) {
3699                 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3700                 if (r)
3701                         return r;
3702         }
3703
3704         /* enable PCIE atomic ops */
3705         if (amdgpu_sriov_vf(adev))
3706                 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3707                         adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3708                         (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3709         else
3710                 adev->have_atomics_support =
3711                         !pci_enable_atomic_ops_to_root(adev->pdev,
3712                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3713                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3714         if (!adev->have_atomics_support)
3715                 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3716
3717         /* doorbell bar mapping and doorbell index init*/
3718         amdgpu_device_doorbell_init(adev);
3719
3720         if (amdgpu_emu_mode == 1) {
3721                 /* post the asic on emulation mode */
3722                 emu_soc_asic_init(adev);
3723                 goto fence_driver_init;
3724         }
3725
3726         amdgpu_reset_init(adev);
3727
3728         /* detect if we are with an SRIOV vbios */
3729         amdgpu_device_detect_sriov_bios(adev);
3730
3731         /* check if we need to reset the asic
3732          *  E.g., driver was not cleanly unloaded previously, etc.
3733          */
3734         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3735                 if (adev->gmc.xgmi.num_physical_nodes) {
3736                         dev_info(adev->dev, "Pending hive reset.\n");
3737                         adev->gmc.xgmi.pending_reset = true;
3738                         /* Only need to init necessary block for SMU to handle the reset */
3739                         for (i = 0; i < adev->num_ip_blocks; i++) {
3740                                 if (!adev->ip_blocks[i].status.valid)
3741                                         continue;
3742                                 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3743                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3744                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3745                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3746                                         DRM_DEBUG("IP %s disabled for hw_init.\n",
3747                                                 adev->ip_blocks[i].version->funcs->name);
3748                                         adev->ip_blocks[i].status.hw = true;
3749                                 }
3750                         }
3751                 } else {
3752                         r = amdgpu_asic_reset(adev);
3753                         if (r) {
3754                                 dev_err(adev->dev, "asic reset on init failed\n");
3755                                 goto failed;
3756                         }
3757                 }
3758         }
3759
3760         pci_enable_pcie_error_reporting(adev->pdev);
3761
3762         /* Post card if necessary */
3763         if (amdgpu_device_need_post(adev)) {
3764                 if (!adev->bios) {
3765                         dev_err(adev->dev, "no vBIOS found\n");
3766                         r = -EINVAL;
3767                         goto failed;
3768                 }
3769                 DRM_INFO("GPU posting now...\n");
3770                 r = amdgpu_device_asic_init(adev);
3771                 if (r) {
3772                         dev_err(adev->dev, "gpu post error!\n");
3773                         goto failed;
3774                 }
3775         }
3776
3777         if (adev->is_atom_fw) {
3778                 /* Initialize clocks */
3779                 r = amdgpu_atomfirmware_get_clock_info(adev);
3780                 if (r) {
3781                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3782                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3783                         goto failed;
3784                 }
3785         } else {
3786                 /* Initialize clocks */
3787                 r = amdgpu_atombios_get_clock_info(adev);
3788                 if (r) {
3789                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3790                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3791                         goto failed;
3792                 }
3793                 /* init i2c buses */
3794                 if (!amdgpu_device_has_dc_support(adev))
3795                         amdgpu_atombios_i2c_init(adev);
3796         }
3797
3798 fence_driver_init:
3799         /* Fence driver */
3800         r = amdgpu_fence_driver_sw_init(adev);
3801         if (r) {
3802                 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3803                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3804                 goto failed;
3805         }
3806
3807         /* init the mode config */
3808         drm_mode_config_init(adev_to_drm(adev));
3809
3810         r = amdgpu_device_ip_init(adev);
3811         if (r) {
3812                 /* failed in exclusive mode due to timeout */
3813                 if (amdgpu_sriov_vf(adev) &&
3814                     !amdgpu_sriov_runtime(adev) &&
3815                     amdgpu_virt_mmio_blocked(adev) &&
3816                     !amdgpu_virt_wait_reset(adev)) {
3817                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3818                         /* Don't send request since VF is inactive. */
3819                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3820                         adev->virt.ops = NULL;
3821                         r = -EAGAIN;
3822                         goto release_ras_con;
3823                 }
3824                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3825                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3826                 goto release_ras_con;
3827         }
3828
3829         amdgpu_fence_driver_hw_init(adev);
3830
3831         dev_info(adev->dev,
3832                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3833                         adev->gfx.config.max_shader_engines,
3834                         adev->gfx.config.max_sh_per_se,
3835                         adev->gfx.config.max_cu_per_sh,
3836                         adev->gfx.cu_info.number);
3837
3838         adev->accel_working = true;
3839
3840         amdgpu_vm_check_compute_bug(adev);
3841
3842         /* Initialize the buffer migration limit. */
3843         if (amdgpu_moverate >= 0)
3844                 max_MBps = amdgpu_moverate;
3845         else
3846                 max_MBps = 8; /* Allow 8 MB/s. */
3847         /* Get a log2 for easy divisions. */
3848         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3849
3850         r = amdgpu_pm_sysfs_init(adev);
3851         if (r) {
3852                 adev->pm_sysfs_en = false;
3853                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3854         } else
3855                 adev->pm_sysfs_en = true;
3856
3857         r = amdgpu_ucode_sysfs_init(adev);
3858         if (r) {
3859                 adev->ucode_sysfs_en = false;
3860                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3861         } else
3862                 adev->ucode_sysfs_en = true;
3863
3864         r = amdgpu_psp_sysfs_init(adev);
3865         if (r) {
3866                 adev->psp_sysfs_en = false;
3867                 if (!amdgpu_sriov_vf(adev))
3868                         DRM_ERROR("Creating psp sysfs failed\n");
3869         } else
3870                 adev->psp_sysfs_en = true;
3871
3872         /*
3873          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3874          * Otherwise the mgpu fan boost feature will be skipped due to the
3875          * gpu instance is counted less.
3876          */
3877         amdgpu_register_gpu_instance(adev);
3878
3879         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3880          * explicit gating rather than handling it automatically.
3881          */
3882         if (!adev->gmc.xgmi.pending_reset) {
3883                 r = amdgpu_device_ip_late_init(adev);
3884                 if (r) {
3885                         dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3886                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3887                         goto release_ras_con;
3888                 }
3889                 /* must succeed. */
3890                 amdgpu_ras_resume(adev);
3891                 queue_delayed_work(system_wq, &adev->delayed_init_work,
3892                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3893         }
3894
3895         if (amdgpu_sriov_vf(adev))
3896                 flush_delayed_work(&adev->delayed_init_work);
3897
3898         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3899         if (r)
3900                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3901
3902         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3903                 r = amdgpu_pmu_init(adev);
3904         if (r)
3905                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3906
3907         /* Have stored pci confspace at hand for restore in sudden PCI error */
3908         if (amdgpu_device_cache_pci_state(adev->pdev))
3909                 pci_restore_state(pdev);
3910
3911         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3912         /* this will fail for cards that aren't VGA class devices, just
3913          * ignore it */
3914         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3915                 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3916
3917         if (amdgpu_device_supports_px(ddev)) {
3918                 px = true;
3919                 vga_switcheroo_register_client(adev->pdev,
3920                                                &amdgpu_switcheroo_ops, px);
3921                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3922         }
3923
3924         if (adev->gmc.xgmi.pending_reset)
3925                 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3926                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3927
3928         amdgpu_device_check_iommu_direct_map(adev);
3929
3930         return 0;
3931
3932 release_ras_con:
3933         amdgpu_release_ras_context(adev);
3934
3935 failed:
3936         amdgpu_vf_error_trans_all(adev);
3937
3938         return r;
3939 }
3940
3941 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3942 {
3943
3944         /* Clear all CPU mappings pointing to this device */
3945         unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3946
3947         /* Unmap all mapped bars - Doorbell, registers and VRAM */
3948         amdgpu_device_doorbell_fini(adev);
3949
3950         iounmap(adev->rmmio);
3951         adev->rmmio = NULL;
3952         if (adev->mman.aper_base_kaddr)
3953                 iounmap(adev->mman.aper_base_kaddr);
3954         adev->mman.aper_base_kaddr = NULL;
3955
3956         /* Memory manager related */
3957         if (!adev->gmc.xgmi.connected_to_cpu) {
3958                 arch_phys_wc_del(adev->gmc.vram_mtrr);
3959                 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3960         }
3961 }
3962
3963 /**
3964  * amdgpu_device_fini_hw - tear down the driver
3965  *
3966  * @adev: amdgpu_device pointer
3967  *
3968  * Tear down the driver info (all asics).
3969  * Called at driver shutdown.
3970  */
3971 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3972 {
3973         dev_info(adev->dev, "amdgpu: finishing device.\n");
3974         flush_delayed_work(&adev->delayed_init_work);
3975         adev->shutdown = true;
3976
3977         /* make sure IB test finished before entering exclusive mode
3978          * to avoid preemption on IB test
3979          * */
3980         if (amdgpu_sriov_vf(adev)) {
3981                 amdgpu_virt_request_full_gpu(adev, false);
3982                 amdgpu_virt_fini_data_exchange(adev);
3983         }
3984
3985         /* disable all interrupts */
3986         amdgpu_irq_disable_all(adev);
3987         if (adev->mode_info.mode_config_initialized){
3988                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3989                         drm_helper_force_disable_all(adev_to_drm(adev));
3990                 else
3991                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3992         }
3993         amdgpu_fence_driver_hw_fini(adev);
3994
3995         if (adev->mman.initialized)
3996                 drain_workqueue(adev->mman.bdev.wq);
3997
3998         if (adev->pm_sysfs_en)
3999                 amdgpu_pm_sysfs_fini(adev);
4000         if (adev->ucode_sysfs_en)
4001                 amdgpu_ucode_sysfs_fini(adev);
4002         if (adev->psp_sysfs_en)
4003                 amdgpu_psp_sysfs_fini(adev);
4004         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4005
4006         /* disable ras feature must before hw fini */
4007         amdgpu_ras_pre_fini(adev);
4008
4009         amdgpu_device_ip_fini_early(adev);
4010
4011         amdgpu_irq_fini_hw(adev);
4012
4013         if (adev->mman.initialized)
4014                 ttm_device_clear_dma_mappings(&adev->mman.bdev);
4015
4016         amdgpu_gart_dummy_page_fini(adev);
4017
4018         amdgpu_device_unmap_mmio(adev);
4019
4020 }
4021
4022 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4023 {
4024         int idx;
4025
4026         amdgpu_fence_driver_sw_fini(adev);
4027         amdgpu_device_ip_fini(adev);
4028         amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4029         adev->accel_working = false;
4030         dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4031
4032         amdgpu_reset_fini(adev);
4033
4034         /* free i2c buses */
4035         if (!amdgpu_device_has_dc_support(adev))
4036                 amdgpu_i2c_fini(adev);
4037
4038         if (amdgpu_emu_mode != 1)
4039                 amdgpu_atombios_fini(adev);
4040
4041         kfree(adev->bios);
4042         adev->bios = NULL;
4043         if (amdgpu_device_supports_px(adev_to_drm(adev))) {
4044                 vga_switcheroo_unregister_client(adev->pdev);
4045                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4046         }
4047         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4048                 vga_client_unregister(adev->pdev);
4049
4050         if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4051
4052                 iounmap(adev->rmmio);
4053                 adev->rmmio = NULL;
4054                 amdgpu_device_doorbell_fini(adev);
4055                 drm_dev_exit(idx);
4056         }
4057
4058         if (IS_ENABLED(CONFIG_PERF_EVENTS))
4059                 amdgpu_pmu_fini(adev);
4060         if (adev->mman.discovery_bin)
4061                 amdgpu_discovery_fini(adev);
4062
4063         amdgpu_reset_put_reset_domain(adev->reset_domain);
4064         adev->reset_domain = NULL;
4065
4066         kfree(adev->pci_state);
4067
4068 }
4069
4070 /**
4071  * amdgpu_device_evict_resources - evict device resources
4072  * @adev: amdgpu device object
4073  *
4074  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4075  * of the vram memory type. Mainly used for evicting device resources
4076  * at suspend time.
4077  *
4078  */
4079 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4080 {
4081         int ret;
4082
4083         /* No need to evict vram on APUs for suspend to ram or s2idle */
4084         if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4085                 return 0;
4086
4087         ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4088         if (ret)
4089                 DRM_WARN("evicting device resources failed\n");
4090         return ret;
4091 }
4092
4093 /*
4094  * Suspend & resume.
4095  */
4096 /**
4097  * amdgpu_device_suspend - initiate device suspend
4098  *
4099  * @dev: drm dev pointer
4100  * @fbcon : notify the fbdev of suspend
4101  *
4102  * Puts the hw in the suspend state (all asics).
4103  * Returns 0 for success or an error on failure.
4104  * Called at driver suspend.
4105  */
4106 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4107 {
4108         struct amdgpu_device *adev = drm_to_adev(dev);
4109         int r = 0;
4110
4111         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4112                 return 0;
4113
4114         adev->in_suspend = true;
4115
4116         /* Evict the majority of BOs before grabbing the full access */
4117         r = amdgpu_device_evict_resources(adev);
4118         if (r)
4119                 return r;
4120
4121         if (amdgpu_sriov_vf(adev)) {
4122                 amdgpu_virt_fini_data_exchange(adev);
4123                 r = amdgpu_virt_request_full_gpu(adev, false);
4124                 if (r)
4125                         return r;
4126         }
4127
4128         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4129                 DRM_WARN("smart shift update failed\n");
4130
4131         drm_kms_helper_poll_disable(dev);
4132
4133         if (fbcon)
4134                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4135
4136         cancel_delayed_work_sync(&adev->delayed_init_work);
4137
4138         amdgpu_ras_suspend(adev);
4139
4140         amdgpu_device_ip_suspend_phase1(adev);
4141
4142         if (!adev->in_s0ix)
4143                 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4144
4145         r = amdgpu_device_evict_resources(adev);
4146         if (r)
4147                 return r;
4148
4149         amdgpu_fence_driver_hw_fini(adev);
4150
4151         amdgpu_device_ip_suspend_phase2(adev);
4152
4153         if (amdgpu_sriov_vf(adev))
4154                 amdgpu_virt_release_full_gpu(adev, false);
4155
4156         return 0;
4157 }
4158
4159 /**
4160  * amdgpu_device_resume - initiate device resume
4161  *
4162  * @dev: drm dev pointer
4163  * @fbcon : notify the fbdev of resume
4164  *
4165  * Bring the hw back to operating state (all asics).
4166  * Returns 0 for success or an error on failure.
4167  * Called at driver resume.
4168  */
4169 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4170 {
4171         struct amdgpu_device *adev = drm_to_adev(dev);
4172         int r = 0;
4173
4174         if (amdgpu_sriov_vf(adev)) {
4175                 r = amdgpu_virt_request_full_gpu(adev, true);
4176                 if (r)
4177                         return r;
4178         }
4179
4180         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4181                 return 0;
4182
4183         if (adev->in_s0ix)
4184                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4185
4186         /* post card */
4187         if (amdgpu_device_need_post(adev)) {
4188                 r = amdgpu_device_asic_init(adev);
4189                 if (r)
4190                         dev_err(adev->dev, "amdgpu asic init failed\n");
4191         }
4192
4193         r = amdgpu_device_ip_resume(adev);
4194
4195         if (r) {
4196                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4197                 goto exit;
4198         }
4199         amdgpu_fence_driver_hw_init(adev);
4200
4201         r = amdgpu_device_ip_late_init(adev);
4202         if (r)
4203                 goto exit;
4204
4205         queue_delayed_work(system_wq, &adev->delayed_init_work,
4206                            msecs_to_jiffies(AMDGPU_RESUME_MS));
4207
4208         if (!adev->in_s0ix) {
4209                 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4210                 if (r)
4211                         goto exit;
4212         }
4213
4214 exit:
4215         if (amdgpu_sriov_vf(adev)) {
4216                 amdgpu_virt_init_data_exchange(adev);
4217                 amdgpu_virt_release_full_gpu(adev, true);
4218         }
4219
4220         if (r)
4221                 return r;
4222
4223         /* Make sure IB tests flushed */
4224         flush_delayed_work(&adev->delayed_init_work);
4225
4226         if (fbcon)
4227                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4228
4229         drm_kms_helper_poll_enable(dev);
4230
4231         amdgpu_ras_resume(adev);
4232
4233         if (adev->mode_info.num_crtc) {
4234                 /*
4235                  * Most of the connector probing functions try to acquire runtime pm
4236                  * refs to ensure that the GPU is powered on when connector polling is
4237                  * performed. Since we're calling this from a runtime PM callback,
4238                  * trying to acquire rpm refs will cause us to deadlock.
4239                  *
4240                  * Since we're guaranteed to be holding the rpm lock, it's safe to
4241                  * temporarily disable the rpm helpers so this doesn't deadlock us.
4242                  */
4243 #ifdef CONFIG_PM
4244                 dev->dev->power.disable_depth++;
4245 #endif
4246                 if (!adev->dc_enabled)
4247                         drm_helper_hpd_irq_event(dev);
4248                 else
4249                         drm_kms_helper_hotplug_event(dev);
4250 #ifdef CONFIG_PM
4251                 dev->dev->power.disable_depth--;
4252 #endif
4253         }
4254         adev->in_suspend = false;
4255
4256         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4257                 DRM_WARN("smart shift update failed\n");
4258
4259         return 0;
4260 }
4261
4262 /**
4263  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4264  *
4265  * @adev: amdgpu_device pointer
4266  *
4267  * The list of all the hardware IPs that make up the asic is walked and
4268  * the check_soft_reset callbacks are run.  check_soft_reset determines
4269  * if the asic is still hung or not.
4270  * Returns true if any of the IPs are still in a hung state, false if not.
4271  */
4272 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4273 {
4274         int i;
4275         bool asic_hang = false;
4276
4277         if (amdgpu_sriov_vf(adev))
4278                 return true;
4279
4280         if (amdgpu_asic_need_full_reset(adev))
4281                 return true;
4282
4283         for (i = 0; i < adev->num_ip_blocks; i++) {
4284                 if (!adev->ip_blocks[i].status.valid)
4285                         continue;
4286                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4287                         adev->ip_blocks[i].status.hang =
4288                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4289                 if (adev->ip_blocks[i].status.hang) {
4290                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4291                         asic_hang = true;
4292                 }
4293         }
4294         return asic_hang;
4295 }
4296
4297 /**
4298  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4299  *
4300  * @adev: amdgpu_device pointer
4301  *
4302  * The list of all the hardware IPs that make up the asic is walked and the
4303  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4304  * handles any IP specific hardware or software state changes that are
4305  * necessary for a soft reset to succeed.
4306  * Returns 0 on success, negative error code on failure.
4307  */
4308 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4309 {
4310         int i, r = 0;
4311
4312         for (i = 0; i < adev->num_ip_blocks; i++) {
4313                 if (!adev->ip_blocks[i].status.valid)
4314                         continue;
4315                 if (adev->ip_blocks[i].status.hang &&
4316                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4317                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4318                         if (r)
4319                                 return r;
4320                 }
4321         }
4322
4323         return 0;
4324 }
4325
4326 /**
4327  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4328  *
4329  * @adev: amdgpu_device pointer
4330  *
4331  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4332  * reset is necessary to recover.
4333  * Returns true if a full asic reset is required, false if not.
4334  */
4335 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4336 {
4337         int i;
4338
4339         if (amdgpu_asic_need_full_reset(adev))
4340                 return true;
4341
4342         for (i = 0; i < adev->num_ip_blocks; i++) {
4343                 if (!adev->ip_blocks[i].status.valid)
4344                         continue;
4345                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4346                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4347                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4348                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4349                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4350                         if (adev->ip_blocks[i].status.hang) {
4351                                 dev_info(adev->dev, "Some block need full reset!\n");
4352                                 return true;
4353                         }
4354                 }
4355         }
4356         return false;
4357 }
4358
4359 /**
4360  * amdgpu_device_ip_soft_reset - do a soft reset
4361  *
4362  * @adev: amdgpu_device pointer
4363  *
4364  * The list of all the hardware IPs that make up the asic is walked and the
4365  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4366  * IP specific hardware or software state changes that are necessary to soft
4367  * reset the IP.
4368  * Returns 0 on success, negative error code on failure.
4369  */
4370 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4371 {
4372         int i, r = 0;
4373
4374         for (i = 0; i < adev->num_ip_blocks; i++) {
4375                 if (!adev->ip_blocks[i].status.valid)
4376                         continue;
4377                 if (adev->ip_blocks[i].status.hang &&
4378                     adev->ip_blocks[i].version->funcs->soft_reset) {
4379                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4380                         if (r)
4381                                 return r;
4382                 }
4383         }
4384
4385         return 0;
4386 }
4387
4388 /**
4389  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4390  *
4391  * @adev: amdgpu_device pointer
4392  *
4393  * The list of all the hardware IPs that make up the asic is walked and the
4394  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4395  * handles any IP specific hardware or software state changes that are
4396  * necessary after the IP has been soft reset.
4397  * Returns 0 on success, negative error code on failure.
4398  */
4399 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4400 {
4401         int i, r = 0;
4402
4403         for (i = 0; i < adev->num_ip_blocks; i++) {
4404                 if (!adev->ip_blocks[i].status.valid)
4405                         continue;
4406                 if (adev->ip_blocks[i].status.hang &&
4407                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4408                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4409                 if (r)
4410                         return r;
4411         }
4412
4413         return 0;
4414 }
4415
4416 /**
4417  * amdgpu_device_recover_vram - Recover some VRAM contents
4418  *
4419  * @adev: amdgpu_device pointer
4420  *
4421  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4422  * restore things like GPUVM page tables after a GPU reset where
4423  * the contents of VRAM might be lost.
4424  *
4425  * Returns:
4426  * 0 on success, negative error code on failure.
4427  */
4428 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4429 {
4430         struct dma_fence *fence = NULL, *next = NULL;
4431         struct amdgpu_bo *shadow;
4432         struct amdgpu_bo_vm *vmbo;
4433         long r = 1, tmo;
4434
4435         if (amdgpu_sriov_runtime(adev))
4436                 tmo = msecs_to_jiffies(8000);
4437         else
4438                 tmo = msecs_to_jiffies(100);
4439
4440         dev_info(adev->dev, "recover vram bo from shadow start\n");
4441         mutex_lock(&adev->shadow_list_lock);
4442         list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4443                 shadow = &vmbo->bo;
4444                 /* No need to recover an evicted BO */
4445                 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4446                     shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4447                     shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4448                         continue;
4449
4450                 r = amdgpu_bo_restore_shadow(shadow, &next);
4451                 if (r)
4452                         break;
4453
4454                 if (fence) {
4455                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4456                         dma_fence_put(fence);
4457                         fence = next;
4458                         if (tmo == 0) {
4459                                 r = -ETIMEDOUT;
4460                                 break;
4461                         } else if (tmo < 0) {
4462                                 r = tmo;
4463                                 break;
4464                         }
4465                 } else {
4466                         fence = next;
4467                 }
4468         }
4469         mutex_unlock(&adev->shadow_list_lock);
4470
4471         if (fence)
4472                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4473         dma_fence_put(fence);
4474
4475         if (r < 0 || tmo <= 0) {
4476                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4477                 return -EIO;
4478         }
4479
4480         dev_info(adev->dev, "recover vram bo from shadow done\n");
4481         return 0;
4482 }
4483
4484
4485 /**
4486  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4487  *
4488  * @adev: amdgpu_device pointer
4489  * @from_hypervisor: request from hypervisor
4490  *
4491  * do VF FLR and reinitialize Asic
4492  * return 0 means succeeded otherwise failed
4493  */
4494 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4495                                      bool from_hypervisor)
4496 {
4497         int r;
4498         struct amdgpu_hive_info *hive = NULL;
4499         int retry_limit = 0;
4500
4501 retry:
4502         amdgpu_amdkfd_pre_reset(adev);
4503
4504         if (from_hypervisor)
4505                 r = amdgpu_virt_request_full_gpu(adev, true);
4506         else
4507                 r = amdgpu_virt_reset_gpu(adev);
4508         if (r)
4509                 return r;
4510
4511         /* Resume IP prior to SMC */
4512         r = amdgpu_device_ip_reinit_early_sriov(adev);
4513         if (r)
4514                 goto error;
4515
4516         amdgpu_virt_init_data_exchange(adev);
4517
4518         r = amdgpu_device_fw_loading(adev);
4519         if (r)
4520                 return r;
4521
4522         /* now we are okay to resume SMC/CP/SDMA */
4523         r = amdgpu_device_ip_reinit_late_sriov(adev);
4524         if (r)
4525                 goto error;
4526
4527         hive = amdgpu_get_xgmi_hive(adev);
4528         /* Update PSP FW topology after reset */
4529         if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4530                 r = amdgpu_xgmi_update_topology(hive, adev);
4531
4532         if (hive)
4533                 amdgpu_put_xgmi_hive(hive);
4534
4535         if (!r) {
4536                 amdgpu_irq_gpu_reset_resume_helper(adev);
4537                 r = amdgpu_ib_ring_tests(adev);
4538
4539                 amdgpu_amdkfd_post_reset(adev);
4540         }
4541
4542 error:
4543         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4544                 amdgpu_inc_vram_lost(adev);
4545                 r = amdgpu_device_recover_vram(adev);
4546         }
4547         amdgpu_virt_release_full_gpu(adev, true);
4548
4549         if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4550                 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4551                         retry_limit++;
4552                         goto retry;
4553                 } else
4554                         DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4555         }
4556
4557         return r;
4558 }
4559
4560 /**
4561  * amdgpu_device_has_job_running - check if there is any job in mirror list
4562  *
4563  * @adev: amdgpu_device pointer
4564  *
4565  * check if there is any job in mirror list
4566  */
4567 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4568 {
4569         int i;
4570         struct drm_sched_job *job;
4571
4572         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4573                 struct amdgpu_ring *ring = adev->rings[i];
4574
4575                 if (!ring || !ring->sched.thread)
4576                         continue;
4577
4578                 spin_lock(&ring->sched.job_list_lock);
4579                 job = list_first_entry_or_null(&ring->sched.pending_list,
4580                                                struct drm_sched_job, list);
4581                 spin_unlock(&ring->sched.job_list_lock);
4582                 if (job)
4583                         return true;
4584         }
4585         return false;
4586 }
4587
4588 /**
4589  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4590  *
4591  * @adev: amdgpu_device pointer
4592  *
4593  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4594  * a hung GPU.
4595  */
4596 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4597 {
4598
4599         if (amdgpu_gpu_recovery == 0)
4600                 goto disabled;
4601
4602         /* Skip soft reset check in fatal error mode */
4603         if (!amdgpu_ras_is_poison_mode_supported(adev))
4604                 return true;
4605
4606         if (amdgpu_sriov_vf(adev))
4607                 return true;
4608
4609         if (amdgpu_gpu_recovery == -1) {
4610                 switch (adev->asic_type) {
4611 #ifdef CONFIG_DRM_AMDGPU_SI
4612                 case CHIP_VERDE:
4613                 case CHIP_TAHITI:
4614                 case CHIP_PITCAIRN:
4615                 case CHIP_OLAND:
4616                 case CHIP_HAINAN:
4617 #endif
4618 #ifdef CONFIG_DRM_AMDGPU_CIK
4619                 case CHIP_KAVERI:
4620                 case CHIP_KABINI:
4621                 case CHIP_MULLINS:
4622 #endif
4623                 case CHIP_CARRIZO:
4624                 case CHIP_STONEY:
4625                 case CHIP_CYAN_SKILLFISH:
4626                         goto disabled;
4627                 default:
4628                         break;
4629                 }
4630         }
4631
4632         return true;
4633
4634 disabled:
4635                 dev_info(adev->dev, "GPU recovery disabled.\n");
4636                 return false;
4637 }
4638
4639 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4640 {
4641         u32 i;
4642         int ret = 0;
4643
4644         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4645
4646         dev_info(adev->dev, "GPU mode1 reset\n");
4647
4648         /* disable BM */
4649         pci_clear_master(adev->pdev);
4650
4651         amdgpu_device_cache_pci_state(adev->pdev);
4652
4653         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4654                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4655                 ret = amdgpu_dpm_mode1_reset(adev);
4656         } else {
4657                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4658                 ret = psp_gpu_reset(adev);
4659         }
4660
4661         if (ret)
4662                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4663
4664         amdgpu_device_load_pci_state(adev->pdev);
4665
4666         /* wait for asic to come out of reset */
4667         for (i = 0; i < adev->usec_timeout; i++) {
4668                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4669
4670                 if (memsize != 0xffffffff)
4671                         break;
4672                 udelay(1);
4673         }
4674
4675         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4676         return ret;
4677 }
4678
4679 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4680                                  struct amdgpu_reset_context *reset_context)
4681 {
4682         int i, r = 0;
4683         struct amdgpu_job *job = NULL;
4684         bool need_full_reset =
4685                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4686
4687         if (reset_context->reset_req_dev == adev)
4688                 job = reset_context->job;
4689
4690         if (amdgpu_sriov_vf(adev)) {
4691                 /* stop the data exchange thread */
4692                 amdgpu_virt_fini_data_exchange(adev);
4693         }
4694
4695         amdgpu_fence_driver_isr_toggle(adev, true);
4696
4697         /* block all schedulers and reset given job's ring */
4698         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4699                 struct amdgpu_ring *ring = adev->rings[i];
4700
4701                 if (!ring || !ring->sched.thread)
4702                         continue;
4703
4704                 /*clear job fence from fence drv to avoid force_completion
4705                  *leave NULL and vm flush fence in fence drv */
4706                 amdgpu_fence_driver_clear_job_fences(ring);
4707
4708                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4709                 amdgpu_fence_driver_force_completion(ring);
4710         }
4711
4712         amdgpu_fence_driver_isr_toggle(adev, false);
4713
4714         if (job && job->vm)
4715                 drm_sched_increase_karma(&job->base);
4716
4717         r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4718         /* If reset handler not implemented, continue; otherwise return */
4719         if (r == -ENOSYS)
4720                 r = 0;
4721         else
4722                 return r;
4723
4724         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4725         if (!amdgpu_sriov_vf(adev)) {
4726
4727                 if (!need_full_reset)
4728                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4729
4730                 if (!need_full_reset && amdgpu_gpu_recovery &&
4731                     amdgpu_device_ip_check_soft_reset(adev)) {
4732                         amdgpu_device_ip_pre_soft_reset(adev);
4733                         r = amdgpu_device_ip_soft_reset(adev);
4734                         amdgpu_device_ip_post_soft_reset(adev);
4735                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4736                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4737                                 need_full_reset = true;
4738                         }
4739                 }
4740
4741                 if (need_full_reset)
4742                         r = amdgpu_device_ip_suspend(adev);
4743                 if (need_full_reset)
4744                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4745                 else
4746                         clear_bit(AMDGPU_NEED_FULL_RESET,
4747                                   &reset_context->flags);
4748         }
4749
4750         return r;
4751 }
4752
4753 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4754 {
4755         int i;
4756
4757         lockdep_assert_held(&adev->reset_domain->sem);
4758
4759         for (i = 0; i < adev->num_regs; i++) {
4760                 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4761                 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4762                                              adev->reset_dump_reg_value[i]);
4763         }
4764
4765         return 0;
4766 }
4767
4768 #ifdef CONFIG_DEV_COREDUMP
4769 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4770                 size_t count, void *data, size_t datalen)
4771 {
4772         struct drm_printer p;
4773         struct amdgpu_device *adev = data;
4774         struct drm_print_iterator iter;
4775         int i;
4776
4777         iter.data = buffer;
4778         iter.offset = 0;
4779         iter.start = offset;
4780         iter.remain = count;
4781
4782         p = drm_coredump_printer(&iter);
4783
4784         drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4785         drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4786         drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4787         drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4788         if (adev->reset_task_info.pid)
4789                 drm_printf(&p, "process_name: %s PID: %d\n",
4790                            adev->reset_task_info.process_name,
4791                            adev->reset_task_info.pid);
4792
4793         if (adev->reset_vram_lost)
4794                 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4795         if (adev->num_regs) {
4796                 drm_printf(&p, "AMDGPU register dumps:\nOffset:     Value:\n");
4797
4798                 for (i = 0; i < adev->num_regs; i++)
4799                         drm_printf(&p, "0x%08x: 0x%08x\n",
4800                                    adev->reset_dump_reg_list[i],
4801                                    adev->reset_dump_reg_value[i]);
4802         }
4803
4804         return count - iter.remain;
4805 }
4806
4807 static void amdgpu_devcoredump_free(void *data)
4808 {
4809 }
4810
4811 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4812 {
4813         struct drm_device *dev = adev_to_drm(adev);
4814
4815         ktime_get_ts64(&adev->reset_time);
4816         dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4817                       amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4818 }
4819 #endif
4820
4821 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4822                          struct amdgpu_reset_context *reset_context)
4823 {
4824         struct amdgpu_device *tmp_adev = NULL;
4825         bool need_full_reset, skip_hw_reset, vram_lost = false;
4826         int r = 0;
4827         bool gpu_reset_for_dev_remove = 0;
4828
4829         /* Try reset handler method first */
4830         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4831                                     reset_list);
4832         amdgpu_reset_reg_dumps(tmp_adev);
4833
4834         reset_context->reset_device_list = device_list_handle;
4835         r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4836         /* If reset handler not implemented, continue; otherwise return */
4837         if (r == -ENOSYS)
4838                 r = 0;
4839         else
4840                 return r;
4841
4842         /* Reset handler not implemented, use the default method */
4843         need_full_reset =
4844                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4845         skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4846
4847         gpu_reset_for_dev_remove =
4848                 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4849                         test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4850
4851         /*
4852          * ASIC reset has to be done on all XGMI hive nodes ASAP
4853          * to allow proper links negotiation in FW (within 1 sec)
4854          */
4855         if (!skip_hw_reset && need_full_reset) {
4856                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4857                         /* For XGMI run all resets in parallel to speed up the process */
4858                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4859                                 tmp_adev->gmc.xgmi.pending_reset = false;
4860                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4861                                         r = -EALREADY;
4862                         } else
4863                                 r = amdgpu_asic_reset(tmp_adev);
4864
4865                         if (r) {
4866                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4867                                          r, adev_to_drm(tmp_adev)->unique);
4868                                 break;
4869                         }
4870                 }
4871
4872                 /* For XGMI wait for all resets to complete before proceed */
4873                 if (!r) {
4874                         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4875                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4876                                         flush_work(&tmp_adev->xgmi_reset_work);
4877                                         r = tmp_adev->asic_reset_res;
4878                                         if (r)
4879                                                 break;
4880                                 }
4881                         }
4882                 }
4883         }
4884
4885         if (!r && amdgpu_ras_intr_triggered()) {
4886                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4887                         if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4888                             tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4889                                 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4890                 }
4891
4892                 amdgpu_ras_intr_cleared();
4893         }
4894
4895         /* Since the mode1 reset affects base ip blocks, the
4896          * phase1 ip blocks need to be resumed. Otherwise there
4897          * will be a BIOS signature error and the psp bootloader
4898          * can't load kdb on the next amdgpu install.
4899          */
4900         if (gpu_reset_for_dev_remove) {
4901                 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
4902                         amdgpu_device_ip_resume_phase1(tmp_adev);
4903
4904                 goto end;
4905         }
4906
4907         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4908                 if (need_full_reset) {
4909                         /* post card */
4910                         r = amdgpu_device_asic_init(tmp_adev);
4911                         if (r) {
4912                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4913                         } else {
4914                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4915                                 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4916                                 if (r)
4917                                         goto out;
4918
4919                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4920                                 if (r)
4921                                         goto out;
4922
4923                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4924 #ifdef CONFIG_DEV_COREDUMP
4925                                 tmp_adev->reset_vram_lost = vram_lost;
4926                                 memset(&tmp_adev->reset_task_info, 0,
4927                                                 sizeof(tmp_adev->reset_task_info));
4928                                 if (reset_context->job && reset_context->job->vm)
4929                                         tmp_adev->reset_task_info =
4930                                                 reset_context->job->vm->task_info;
4931                                 amdgpu_reset_capture_coredumpm(tmp_adev);
4932 #endif
4933                                 if (vram_lost) {
4934                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4935                                         amdgpu_inc_vram_lost(tmp_adev);
4936                                 }
4937
4938                                 r = amdgpu_device_fw_loading(tmp_adev);
4939                                 if (r)
4940                                         return r;
4941
4942                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4943                                 if (r)
4944                                         goto out;
4945
4946                                 if (vram_lost)
4947                                         amdgpu_device_fill_reset_magic(tmp_adev);
4948
4949                                 /*
4950                                  * Add this ASIC as tracked as reset was already
4951                                  * complete successfully.
4952                                  */
4953                                 amdgpu_register_gpu_instance(tmp_adev);
4954
4955                                 if (!reset_context->hive &&
4956                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4957                                         amdgpu_xgmi_add_device(tmp_adev);
4958
4959                                 r = amdgpu_device_ip_late_init(tmp_adev);
4960                                 if (r)
4961                                         goto out;
4962
4963                                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4964
4965                                 /*
4966                                  * The GPU enters bad state once faulty pages
4967                                  * by ECC has reached the threshold, and ras
4968                                  * recovery is scheduled next. So add one check
4969                                  * here to break recovery if it indeed exceeds
4970                                  * bad page threshold, and remind user to
4971                                  * retire this GPU or setting one bigger
4972                                  * bad_page_threshold value to fix this once
4973                                  * probing driver again.
4974                                  */
4975                                 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4976                                         /* must succeed. */
4977                                         amdgpu_ras_resume(tmp_adev);
4978                                 } else {
4979                                         r = -EINVAL;
4980                                         goto out;
4981                                 }
4982
4983                                 /* Update PSP FW topology after reset */
4984                                 if (reset_context->hive &&
4985                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4986                                         r = amdgpu_xgmi_update_topology(
4987                                                 reset_context->hive, tmp_adev);
4988                         }
4989                 }
4990
4991 out:
4992                 if (!r) {
4993                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4994                         r = amdgpu_ib_ring_tests(tmp_adev);
4995                         if (r) {
4996                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4997                                 need_full_reset = true;
4998                                 r = -EAGAIN;
4999                                 goto end;
5000                         }
5001                 }
5002
5003                 if (!r)
5004                         r = amdgpu_device_recover_vram(tmp_adev);
5005                 else
5006                         tmp_adev->asic_reset_res = r;
5007         }
5008
5009 end:
5010         if (need_full_reset)
5011                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5012         else
5013                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5014         return r;
5015 }
5016
5017 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5018 {
5019
5020         switch (amdgpu_asic_reset_method(adev)) {
5021         case AMD_RESET_METHOD_MODE1:
5022                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5023                 break;
5024         case AMD_RESET_METHOD_MODE2:
5025                 adev->mp1_state = PP_MP1_STATE_RESET;
5026                 break;
5027         default:
5028                 adev->mp1_state = PP_MP1_STATE_NONE;
5029                 break;
5030         }
5031 }
5032
5033 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5034 {
5035         amdgpu_vf_error_trans_all(adev);
5036         adev->mp1_state = PP_MP1_STATE_NONE;
5037 }
5038
5039 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5040 {
5041         struct pci_dev *p = NULL;
5042
5043         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5044                         adev->pdev->bus->number, 1);
5045         if (p) {
5046                 pm_runtime_enable(&(p->dev));
5047                 pm_runtime_resume(&(p->dev));
5048         }
5049
5050         pci_dev_put(p);
5051 }
5052
5053 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5054 {
5055         enum amd_reset_method reset_method;
5056         struct pci_dev *p = NULL;
5057         u64 expires;
5058
5059         /*
5060          * For now, only BACO and mode1 reset are confirmed
5061          * to suffer the audio issue without proper suspended.
5062          */
5063         reset_method = amdgpu_asic_reset_method(adev);
5064         if ((reset_method != AMD_RESET_METHOD_BACO) &&
5065              (reset_method != AMD_RESET_METHOD_MODE1))
5066                 return -EINVAL;
5067
5068         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5069                         adev->pdev->bus->number, 1);
5070         if (!p)
5071                 return -ENODEV;
5072
5073         expires = pm_runtime_autosuspend_expiration(&(p->dev));
5074         if (!expires)
5075                 /*
5076                  * If we cannot get the audio device autosuspend delay,
5077                  * a fixed 4S interval will be used. Considering 3S is
5078                  * the audio controller default autosuspend delay setting.
5079                  * 4S used here is guaranteed to cover that.
5080                  */
5081                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5082
5083         while (!pm_runtime_status_suspended(&(p->dev))) {
5084                 if (!pm_runtime_suspend(&(p->dev)))
5085                         break;
5086
5087                 if (expires < ktime_get_mono_fast_ns()) {
5088                         dev_warn(adev->dev, "failed to suspend display audio\n");
5089                         pci_dev_put(p);
5090                         /* TODO: abort the succeeding gpu reset? */
5091                         return -ETIMEDOUT;
5092                 }
5093         }
5094
5095         pm_runtime_disable(&(p->dev));
5096
5097         pci_dev_put(p);
5098         return 0;
5099 }
5100
5101 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5102 {
5103         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5104
5105 #if defined(CONFIG_DEBUG_FS)
5106         if (!amdgpu_sriov_vf(adev))
5107                 cancel_work(&adev->reset_work);
5108 #endif
5109
5110         if (adev->kfd.dev)
5111                 cancel_work(&adev->kfd.reset_work);
5112
5113         if (amdgpu_sriov_vf(adev))
5114                 cancel_work(&adev->virt.flr_work);
5115
5116         if (con && adev->ras_enabled)
5117                 cancel_work(&con->recovery_work);
5118
5119 }
5120
5121 /**
5122  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5123  *
5124  * @adev: amdgpu_device pointer
5125  * @job: which job trigger hang
5126  *
5127  * Attempt to reset the GPU if it has hung (all asics).
5128  * Attempt to do soft-reset or full-reset and reinitialize Asic
5129  * Returns 0 for success or an error on failure.
5130  */
5131
5132 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5133                               struct amdgpu_job *job,
5134                               struct amdgpu_reset_context *reset_context)
5135 {
5136         struct list_head device_list, *device_list_handle =  NULL;
5137         bool job_signaled = false;
5138         struct amdgpu_hive_info *hive = NULL;
5139         struct amdgpu_device *tmp_adev = NULL;
5140         int i, r = 0;
5141         bool need_emergency_restart = false;
5142         bool audio_suspended = false;
5143         bool gpu_reset_for_dev_remove = false;
5144
5145         gpu_reset_for_dev_remove =
5146                         test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5147                                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5148
5149         /*
5150          * Special case: RAS triggered and full reset isn't supported
5151          */
5152         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5153
5154         /*
5155          * Flush RAM to disk so that after reboot
5156          * the user can read log and see why the system rebooted.
5157          */
5158         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5159                 DRM_WARN("Emergency reboot.");
5160
5161                 ksys_sync_helper();
5162                 emergency_restart();
5163         }
5164
5165         dev_info(adev->dev, "GPU %s begin!\n",
5166                 need_emergency_restart ? "jobs stop":"reset");
5167
5168         if (!amdgpu_sriov_vf(adev))
5169                 hive = amdgpu_get_xgmi_hive(adev);
5170         if (hive)
5171                 mutex_lock(&hive->hive_lock);
5172
5173         reset_context->job = job;
5174         reset_context->hive = hive;
5175         /*
5176          * Build list of devices to reset.
5177          * In case we are in XGMI hive mode, resort the device list
5178          * to put adev in the 1st position.
5179          */
5180         INIT_LIST_HEAD(&device_list);
5181         if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5182                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5183                         list_add_tail(&tmp_adev->reset_list, &device_list);
5184                         if (gpu_reset_for_dev_remove && adev->shutdown)
5185                                 tmp_adev->shutdown = true;
5186                 }
5187                 if (!list_is_first(&adev->reset_list, &device_list))
5188                         list_rotate_to_front(&adev->reset_list, &device_list);
5189                 device_list_handle = &device_list;
5190         } else {
5191                 list_add_tail(&adev->reset_list, &device_list);
5192                 device_list_handle = &device_list;
5193         }
5194
5195         /* We need to lock reset domain only once both for XGMI and single device */
5196         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5197                                     reset_list);
5198         amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5199
5200         /* block all schedulers and reset given job's ring */
5201         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5202
5203                 amdgpu_device_set_mp1_state(tmp_adev);
5204
5205                 /*
5206                  * Try to put the audio codec into suspend state
5207                  * before gpu reset started.
5208                  *
5209                  * Due to the power domain of the graphics device
5210                  * is shared with AZ power domain. Without this,
5211                  * we may change the audio hardware from behind
5212                  * the audio driver's back. That will trigger
5213                  * some audio codec errors.
5214                  */
5215                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5216                         audio_suspended = true;
5217
5218                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5219
5220                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5221
5222                 if (!amdgpu_sriov_vf(tmp_adev))
5223                         amdgpu_amdkfd_pre_reset(tmp_adev);
5224
5225                 /*
5226                  * Mark these ASICs to be reseted as untracked first
5227                  * And add them back after reset completed
5228                  */
5229                 amdgpu_unregister_gpu_instance(tmp_adev);
5230
5231                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5232
5233                 /* disable ras on ALL IPs */
5234                 if (!need_emergency_restart &&
5235                       amdgpu_device_ip_need_full_reset(tmp_adev))
5236                         amdgpu_ras_suspend(tmp_adev);
5237
5238                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5239                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5240
5241                         if (!ring || !ring->sched.thread)
5242                                 continue;
5243
5244                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5245
5246                         if (need_emergency_restart)
5247                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5248                 }
5249                 atomic_inc(&tmp_adev->gpu_reset_counter);
5250         }
5251
5252         if (need_emergency_restart)
5253                 goto skip_sched_resume;
5254
5255         /*
5256          * Must check guilty signal here since after this point all old
5257          * HW fences are force signaled.
5258          *
5259          * job->base holds a reference to parent fence
5260          */
5261         if (job && dma_fence_is_signaled(&job->hw_fence)) {
5262                 job_signaled = true;
5263                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5264                 goto skip_hw_reset;
5265         }
5266
5267 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
5268         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5269                 if (gpu_reset_for_dev_remove) {
5270                         /* Workaroud for ASICs need to disable SMC first */
5271                         amdgpu_device_smu_fini_early(tmp_adev);
5272                 }
5273                 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5274                 /*TODO Should we stop ?*/
5275                 if (r) {
5276                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5277                                   r, adev_to_drm(tmp_adev)->unique);
5278                         tmp_adev->asic_reset_res = r;
5279                 }
5280
5281                 /*
5282                  * Drop all pending non scheduler resets. Scheduler resets
5283                  * were already dropped during drm_sched_stop
5284                  */
5285                 amdgpu_device_stop_pending_resets(tmp_adev);
5286         }
5287
5288         /* Actual ASIC resets if needed.*/
5289         /* Host driver will handle XGMI hive reset for SRIOV */
5290         if (amdgpu_sriov_vf(adev)) {
5291                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5292                 if (r)
5293                         adev->asic_reset_res = r;
5294
5295                 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5296                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5297                         amdgpu_ras_resume(adev);
5298         } else {
5299                 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5300                 if (r && r == -EAGAIN)
5301                         goto retry;
5302
5303                 if (!r && gpu_reset_for_dev_remove)
5304                         goto recover_end;
5305         }
5306
5307 skip_hw_reset:
5308
5309         /* Post ASIC reset for all devs .*/
5310         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5311
5312                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5313                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5314
5315                         if (!ring || !ring->sched.thread)
5316                                 continue;
5317
5318                         drm_sched_start(&ring->sched, true);
5319                 }
5320
5321                 if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
5322                         amdgpu_mes_self_test(tmp_adev);
5323
5324                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5325                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5326                 }
5327
5328                 if (tmp_adev->asic_reset_res)
5329                         r = tmp_adev->asic_reset_res;
5330
5331                 tmp_adev->asic_reset_res = 0;
5332
5333                 if (r) {
5334                         /* bad news, how to tell it to userspace ? */
5335                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5336                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5337                 } else {
5338                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5339                         if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5340                                 DRM_WARN("smart shift update failed\n");
5341                 }
5342         }
5343
5344 skip_sched_resume:
5345         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5346                 /* unlock kfd: SRIOV would do it separately */
5347                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5348                         amdgpu_amdkfd_post_reset(tmp_adev);
5349
5350                 /* kfd_post_reset will do nothing if kfd device is not initialized,
5351                  * need to bring up kfd here if it's not be initialized before
5352                  */
5353                 if (!adev->kfd.init_complete)
5354                         amdgpu_amdkfd_device_init(adev);
5355
5356                 if (audio_suspended)
5357                         amdgpu_device_resume_display_audio(tmp_adev);
5358
5359                 amdgpu_device_unset_mp1_state(tmp_adev);
5360
5361                 amdgpu_ras_set_error_query_ready(tmp_adev, true);
5362         }
5363
5364 recover_end:
5365         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5366                                             reset_list);
5367         amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5368
5369         if (hive) {
5370                 mutex_unlock(&hive->hive_lock);
5371                 amdgpu_put_xgmi_hive(hive);
5372         }
5373
5374         if (r)
5375                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5376
5377         atomic_set(&adev->reset_domain->reset_res, r);
5378         return r;
5379 }
5380
5381 /**
5382  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5383  *
5384  * @adev: amdgpu_device pointer
5385  *
5386  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5387  * and lanes) of the slot the device is in. Handles APUs and
5388  * virtualized environments where PCIE config space may not be available.
5389  */
5390 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5391 {
5392         struct pci_dev *pdev;
5393         enum pci_bus_speed speed_cap, platform_speed_cap;
5394         enum pcie_link_width platform_link_width;
5395
5396         if (amdgpu_pcie_gen_cap)
5397                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5398
5399         if (amdgpu_pcie_lane_cap)
5400                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5401
5402         /* covers APUs as well */
5403         if (pci_is_root_bus(adev->pdev->bus)) {
5404                 if (adev->pm.pcie_gen_mask == 0)
5405                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5406                 if (adev->pm.pcie_mlw_mask == 0)
5407                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5408                 return;
5409         }
5410
5411         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5412                 return;
5413
5414         pcie_bandwidth_available(adev->pdev, NULL,
5415                                  &platform_speed_cap, &platform_link_width);
5416
5417         if (adev->pm.pcie_gen_mask == 0) {
5418                 /* asic caps */
5419                 pdev = adev->pdev;
5420                 speed_cap = pcie_get_speed_cap(pdev);
5421                 if (speed_cap == PCI_SPEED_UNKNOWN) {
5422                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5423                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5424                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5425                 } else {
5426                         if (speed_cap == PCIE_SPEED_32_0GT)
5427                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5428                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5429                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5430                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5431                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5432                         else if (speed_cap == PCIE_SPEED_16_0GT)
5433                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5434                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5435                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5436                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5437                         else if (speed_cap == PCIE_SPEED_8_0GT)
5438                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5439                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5440                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5441                         else if (speed_cap == PCIE_SPEED_5_0GT)
5442                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5443                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5444                         else
5445                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5446                 }
5447                 /* platform caps */
5448                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5449                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5450                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5451                 } else {
5452                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
5453                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5454                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5455                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5456                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5457                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5458                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5459                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5460                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5461                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5462                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5463                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5464                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5465                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5466                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5467                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5468                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5469                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5470                         else
5471                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5472
5473                 }
5474         }
5475         if (adev->pm.pcie_mlw_mask == 0) {
5476                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5477                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5478                 } else {
5479                         switch (platform_link_width) {
5480                         case PCIE_LNK_X32:
5481                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5482                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5483                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5484                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5485                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5486                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5487                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5488                                 break;
5489                         case PCIE_LNK_X16:
5490                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5491                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5492                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5493                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5494                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5495                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5496                                 break;
5497                         case PCIE_LNK_X12:
5498                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5499                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5500                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5501                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5502                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5503                                 break;
5504                         case PCIE_LNK_X8:
5505                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5506                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5507                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5508                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5509                                 break;
5510                         case PCIE_LNK_X4:
5511                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5512                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5513                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5514                                 break;
5515                         case PCIE_LNK_X2:
5516                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5517                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5518                                 break;
5519                         case PCIE_LNK_X1:
5520                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5521                                 break;
5522                         default:
5523                                 break;
5524                         }
5525                 }
5526         }
5527 }
5528
5529 /**
5530  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5531  *
5532  * @adev: amdgpu_device pointer
5533  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5534  *
5535  * Return true if @peer_adev can access (DMA) @adev through the PCIe
5536  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5537  * @peer_adev.
5538  */
5539 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5540                                       struct amdgpu_device *peer_adev)
5541 {
5542 #ifdef CONFIG_HSA_AMD_P2P
5543         uint64_t address_mask = peer_adev->dev->dma_mask ?
5544                 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5545         resource_size_t aper_limit =
5546                 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5547         bool p2p_access =
5548                 !adev->gmc.xgmi.connected_to_cpu &&
5549                 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5550
5551         return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5552                 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5553                 !(adev->gmc.aper_base & address_mask ||
5554                   aper_limit & address_mask));
5555 #else
5556         return false;
5557 #endif
5558 }
5559
5560 int amdgpu_device_baco_enter(struct drm_device *dev)
5561 {
5562         struct amdgpu_device *adev = drm_to_adev(dev);
5563         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5564
5565         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5566                 return -ENOTSUPP;
5567
5568         if (ras && adev->ras_enabled &&
5569             adev->nbio.funcs->enable_doorbell_interrupt)
5570                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5571
5572         return amdgpu_dpm_baco_enter(adev);
5573 }
5574
5575 int amdgpu_device_baco_exit(struct drm_device *dev)
5576 {
5577         struct amdgpu_device *adev = drm_to_adev(dev);
5578         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5579         int ret = 0;
5580
5581         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5582                 return -ENOTSUPP;
5583
5584         ret = amdgpu_dpm_baco_exit(adev);
5585         if (ret)
5586                 return ret;
5587
5588         if (ras && adev->ras_enabled &&
5589             adev->nbio.funcs->enable_doorbell_interrupt)
5590                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5591
5592         if (amdgpu_passthrough(adev) &&
5593             adev->nbio.funcs->clear_doorbell_interrupt)
5594                 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5595
5596         return 0;
5597 }
5598
5599 /**
5600  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5601  * @pdev: PCI device struct
5602  * @state: PCI channel state
5603  *
5604  * Description: Called when a PCI error is detected.
5605  *
5606  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5607  */
5608 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5609 {
5610         struct drm_device *dev = pci_get_drvdata(pdev);
5611         struct amdgpu_device *adev = drm_to_adev(dev);
5612         int i;
5613
5614         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5615
5616         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5617                 DRM_WARN("No support for XGMI hive yet...");
5618                 return PCI_ERS_RESULT_DISCONNECT;
5619         }
5620
5621         adev->pci_channel_state = state;
5622
5623         switch (state) {
5624         case pci_channel_io_normal:
5625                 return PCI_ERS_RESULT_CAN_RECOVER;
5626         /* Fatal error, prepare for slot reset */
5627         case pci_channel_io_frozen:
5628                 /*
5629                  * Locking adev->reset_domain->sem will prevent any external access
5630                  * to GPU during PCI error recovery
5631                  */
5632                 amdgpu_device_lock_reset_domain(adev->reset_domain);
5633                 amdgpu_device_set_mp1_state(adev);
5634
5635                 /*
5636                  * Block any work scheduling as we do for regular GPU reset
5637                  * for the duration of the recovery
5638                  */
5639                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5640                         struct amdgpu_ring *ring = adev->rings[i];
5641
5642                         if (!ring || !ring->sched.thread)
5643                                 continue;
5644
5645                         drm_sched_stop(&ring->sched, NULL);
5646                 }
5647                 atomic_inc(&adev->gpu_reset_counter);
5648                 return PCI_ERS_RESULT_NEED_RESET;
5649         case pci_channel_io_perm_failure:
5650                 /* Permanent error, prepare for device removal */
5651                 return PCI_ERS_RESULT_DISCONNECT;
5652         }
5653
5654         return PCI_ERS_RESULT_NEED_RESET;
5655 }
5656
5657 /**
5658  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5659  * @pdev: pointer to PCI device
5660  */
5661 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5662 {
5663
5664         DRM_INFO("PCI error: mmio enabled callback!!\n");
5665
5666         /* TODO - dump whatever for debugging purposes */
5667
5668         /* This called only if amdgpu_pci_error_detected returns
5669          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5670          * works, no need to reset slot.
5671          */
5672
5673         return PCI_ERS_RESULT_RECOVERED;
5674 }
5675
5676 /**
5677  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5678  * @pdev: PCI device struct
5679  *
5680  * Description: This routine is called by the pci error recovery
5681  * code after the PCI slot has been reset, just before we
5682  * should resume normal operations.
5683  */
5684 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5685 {
5686         struct drm_device *dev = pci_get_drvdata(pdev);
5687         struct amdgpu_device *adev = drm_to_adev(dev);
5688         int r, i;
5689         struct amdgpu_reset_context reset_context;
5690         u32 memsize;
5691         struct list_head device_list;
5692
5693         DRM_INFO("PCI error: slot reset callback!!\n");
5694
5695         memset(&reset_context, 0, sizeof(reset_context));
5696
5697         INIT_LIST_HEAD(&device_list);
5698         list_add_tail(&adev->reset_list, &device_list);
5699
5700         /* wait for asic to come out of reset */
5701         msleep(500);
5702
5703         /* Restore PCI confspace */
5704         amdgpu_device_load_pci_state(pdev);
5705
5706         /* confirm  ASIC came out of reset */
5707         for (i = 0; i < adev->usec_timeout; i++) {
5708                 memsize = amdgpu_asic_get_config_memsize(adev);
5709
5710                 if (memsize != 0xffffffff)
5711                         break;
5712                 udelay(1);
5713         }
5714         if (memsize == 0xffffffff) {
5715                 r = -ETIME;
5716                 goto out;
5717         }
5718
5719         reset_context.method = AMD_RESET_METHOD_NONE;
5720         reset_context.reset_req_dev = adev;
5721         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5722         set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5723
5724         adev->no_hw_access = true;
5725         r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5726         adev->no_hw_access = false;
5727         if (r)
5728                 goto out;
5729
5730         r = amdgpu_do_asic_reset(&device_list, &reset_context);
5731
5732 out:
5733         if (!r) {
5734                 if (amdgpu_device_cache_pci_state(adev->pdev))
5735                         pci_restore_state(adev->pdev);
5736
5737                 DRM_INFO("PCIe error recovery succeeded\n");
5738         } else {
5739                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5740                 amdgpu_device_unset_mp1_state(adev);
5741                 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5742         }
5743
5744         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5745 }
5746
5747 /**
5748  * amdgpu_pci_resume() - resume normal ops after PCI reset
5749  * @pdev: pointer to PCI device
5750  *
5751  * Called when the error recovery driver tells us that its
5752  * OK to resume normal operation.
5753  */
5754 void amdgpu_pci_resume(struct pci_dev *pdev)
5755 {
5756         struct drm_device *dev = pci_get_drvdata(pdev);
5757         struct amdgpu_device *adev = drm_to_adev(dev);
5758         int i;
5759
5760
5761         DRM_INFO("PCI error: resume callback!!\n");
5762
5763         /* Only continue execution for the case of pci_channel_io_frozen */
5764         if (adev->pci_channel_state != pci_channel_io_frozen)
5765                 return;
5766
5767         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5768                 struct amdgpu_ring *ring = adev->rings[i];
5769
5770                 if (!ring || !ring->sched.thread)
5771                         continue;
5772
5773                 drm_sched_start(&ring->sched, true);
5774         }
5775
5776         amdgpu_device_unset_mp1_state(adev);
5777         amdgpu_device_unlock_reset_domain(adev->reset_domain);
5778 }
5779
5780 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5781 {
5782         struct drm_device *dev = pci_get_drvdata(pdev);
5783         struct amdgpu_device *adev = drm_to_adev(dev);
5784         int r;
5785
5786         r = pci_save_state(pdev);
5787         if (!r) {
5788                 kfree(adev->pci_state);
5789
5790                 adev->pci_state = pci_store_saved_state(pdev);
5791
5792                 if (!adev->pci_state) {
5793                         DRM_ERROR("Failed to store PCI saved state");
5794                         return false;
5795                 }
5796         } else {
5797                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5798                 return false;
5799         }
5800
5801         return true;
5802 }
5803
5804 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5805 {
5806         struct drm_device *dev = pci_get_drvdata(pdev);
5807         struct amdgpu_device *adev = drm_to_adev(dev);
5808         int r;
5809
5810         if (!adev->pci_state)
5811                 return false;
5812
5813         r = pci_load_saved_state(pdev, adev->pci_state);
5814
5815         if (!r) {
5816                 pci_restore_state(pdev);
5817         } else {
5818                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5819                 return false;
5820         }
5821
5822         return true;
5823 }
5824
5825 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5826                 struct amdgpu_ring *ring)
5827 {
5828 #ifdef CONFIG_X86_64
5829         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5830                 return;
5831 #endif
5832         if (adev->gmc.xgmi.connected_to_cpu)
5833                 return;
5834
5835         if (ring && ring->funcs->emit_hdp_flush)
5836                 amdgpu_ring_emit_hdp_flush(ring);
5837         else
5838                 amdgpu_asic_flush_hdp(adev, ring);
5839 }
5840
5841 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5842                 struct amdgpu_ring *ring)
5843 {
5844 #ifdef CONFIG_X86_64
5845         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5846                 return;
5847 #endif
5848         if (adev->gmc.xgmi.connected_to_cpu)
5849                 return;
5850
5851         amdgpu_asic_invalidate_hdp(adev, ring);
5852 }
5853
5854 int amdgpu_in_reset(struct amdgpu_device *adev)
5855 {
5856         return atomic_read(&adev->reset_domain->in_gpu_reset);
5857         }
5858         
5859 /**
5860  * amdgpu_device_halt() - bring hardware to some kind of halt state
5861  *
5862  * @adev: amdgpu_device pointer
5863  *
5864  * Bring hardware to some kind of halt state so that no one can touch it
5865  * any more. It will help to maintain error context when error occurred.
5866  * Compare to a simple hang, the system will keep stable at least for SSH
5867  * access. Then it should be trivial to inspect the hardware state and
5868  * see what's going on. Implemented as following:
5869  *
5870  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5871  *    clears all CPU mappings to device, disallows remappings through page faults
5872  * 2. amdgpu_irq_disable_all() disables all interrupts
5873  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5874  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5875  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5876  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5877  *    flush any in flight DMA operations
5878  */
5879 void amdgpu_device_halt(struct amdgpu_device *adev)
5880 {
5881         struct pci_dev *pdev = adev->pdev;
5882         struct drm_device *ddev = adev_to_drm(adev);
5883
5884         drm_dev_unplug(ddev);
5885
5886         amdgpu_irq_disable_all(adev);
5887
5888         amdgpu_fence_driver_hw_fini(adev);
5889
5890         adev->no_hw_access = true;
5891
5892         amdgpu_device_unmap_mmio(adev);
5893
5894         pci_disable_device(pdev);
5895         pci_wait_for_pending_transaction(pdev);
5896 }
5897
5898 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5899                                 u32 reg)
5900 {
5901         unsigned long flags, address, data;
5902         u32 r;
5903
5904         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5905         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5906
5907         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5908         WREG32(address, reg * 4);
5909         (void)RREG32(address);
5910         r = RREG32(data);
5911         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5912         return r;
5913 }
5914
5915 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5916                                 u32 reg, u32 v)
5917 {
5918         unsigned long flags, address, data;
5919
5920         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5921         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5922
5923         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5924         WREG32(address, reg * 4);
5925         (void)RREG32(address);
5926         WREG32(data, v);
5927         (void)RREG32(data);
5928         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5929 }
5930
5931 /**
5932  * amdgpu_device_switch_gang - switch to a new gang
5933  * @adev: amdgpu_device pointer
5934  * @gang: the gang to switch to
5935  *
5936  * Try to switch to a new gang.
5937  * Returns: NULL if we switched to the new gang or a reference to the current
5938  * gang leader.
5939  */
5940 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
5941                                             struct dma_fence *gang)
5942 {
5943         struct dma_fence *old = NULL;
5944
5945         do {
5946                 dma_fence_put(old);
5947                 rcu_read_lock();
5948                 old = dma_fence_get_rcu_safe(&adev->gang_submit);
5949                 rcu_read_unlock();
5950
5951                 if (old == gang)
5952                         break;
5953
5954                 if (!dma_fence_is_signaled(old))
5955                         return old;
5956
5957         } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
5958                          old, gang) != old);
5959
5960         dma_fence_put(old);
5961         return NULL;
5962 }
5963
5964 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
5965 {
5966         switch (adev->asic_type) {
5967 #ifdef CONFIG_DRM_AMDGPU_SI
5968         case CHIP_HAINAN:
5969 #endif
5970         case CHIP_TOPAZ:
5971                 /* chips with no display hardware */
5972                 return false;
5973 #ifdef CONFIG_DRM_AMDGPU_SI
5974         case CHIP_TAHITI:
5975         case CHIP_PITCAIRN:
5976         case CHIP_VERDE:
5977         case CHIP_OLAND:
5978 #endif
5979 #ifdef CONFIG_DRM_AMDGPU_CIK
5980         case CHIP_BONAIRE:
5981         case CHIP_HAWAII:
5982         case CHIP_KAVERI:
5983         case CHIP_KABINI:
5984         case CHIP_MULLINS:
5985 #endif
5986         case CHIP_TONGA:
5987         case CHIP_FIJI:
5988         case CHIP_POLARIS10:
5989         case CHIP_POLARIS11:
5990         case CHIP_POLARIS12:
5991         case CHIP_VEGAM:
5992         case CHIP_CARRIZO:
5993         case CHIP_STONEY:
5994                 /* chips with display hardware */
5995                 return true;
5996         default:
5997                 /* IP discovery */
5998                 if (!adev->ip_versions[DCE_HWIP][0] ||
5999                     (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6000                         return false;
6001                 return true;
6002         }
6003 }
This page took 0.404631 seconds and 4 git commands to generate.