]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
Merge tag 'for-5.16-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
43 #include "atom.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
46 #include "amd_pcie.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
48 #include "si.h"
49 #endif
50 #ifdef CONFIG_DRM_AMDGPU_CIK
51 #include "cik.h"
52 #endif
53 #include "vi.h"
54 #include "soc15.h"
55 #include "nv.h"
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
60
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
63
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
67 #include "amdgpu_fru_eeprom.h"
68 #include "amdgpu_reset.h"
69
70 #include <linux/suspend.h>
71 #include <drm/task_barrier.h>
72 #include <linux/pm_runtime.h>
73
74 #include <drm/drm_drv.h>
75
76 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
87 MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin");
88
89 #define AMDGPU_RESUME_MS                2000
90
91 const char *amdgpu_asic_name[] = {
92         "TAHITI",
93         "PITCAIRN",
94         "VERDE",
95         "OLAND",
96         "HAINAN",
97         "BONAIRE",
98         "KAVERI",
99         "KABINI",
100         "HAWAII",
101         "MULLINS",
102         "TOPAZ",
103         "TONGA",
104         "FIJI",
105         "CARRIZO",
106         "STONEY",
107         "POLARIS10",
108         "POLARIS11",
109         "POLARIS12",
110         "VEGAM",
111         "VEGA10",
112         "VEGA12",
113         "VEGA20",
114         "RAVEN",
115         "ARCTURUS",
116         "RENOIR",
117         "ALDEBARAN",
118         "NAVI10",
119         "CYAN_SKILLFISH",
120         "NAVI14",
121         "NAVI12",
122         "SIENNA_CICHLID",
123         "NAVY_FLOUNDER",
124         "VANGOGH",
125         "DIMGREY_CAVEFISH",
126         "BEIGE_GOBY",
127         "YELLOW_CARP",
128         "IP DISCOVERY",
129         "LAST",
130 };
131
132 /**
133  * DOC: pcie_replay_count
134  *
135  * The amdgpu driver provides a sysfs API for reporting the total number
136  * of PCIe replays (NAKs)
137  * The file pcie_replay_count is used for this and returns the total
138  * number of replays as a sum of the NAKs generated and NAKs received
139  */
140
141 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
142                 struct device_attribute *attr, char *buf)
143 {
144         struct drm_device *ddev = dev_get_drvdata(dev);
145         struct amdgpu_device *adev = drm_to_adev(ddev);
146         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
147
148         return sysfs_emit(buf, "%llu\n", cnt);
149 }
150
151 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
152                 amdgpu_device_get_pcie_replay_count, NULL);
153
154 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
155
156 /**
157  * DOC: product_name
158  *
159  * The amdgpu driver provides a sysfs API for reporting the product name
160  * for the device
161  * The file serial_number is used for this and returns the product name
162  * as returned from the FRU.
163  * NOTE: This is only available for certain server cards
164  */
165
166 static ssize_t amdgpu_device_get_product_name(struct device *dev,
167                 struct device_attribute *attr, char *buf)
168 {
169         struct drm_device *ddev = dev_get_drvdata(dev);
170         struct amdgpu_device *adev = drm_to_adev(ddev);
171
172         return sysfs_emit(buf, "%s\n", adev->product_name);
173 }
174
175 static DEVICE_ATTR(product_name, S_IRUGO,
176                 amdgpu_device_get_product_name, NULL);
177
178 /**
179  * DOC: product_number
180  *
181  * The amdgpu driver provides a sysfs API for reporting the part number
182  * for the device
183  * The file serial_number is used for this and returns the part number
184  * as returned from the FRU.
185  * NOTE: This is only available for certain server cards
186  */
187
188 static ssize_t amdgpu_device_get_product_number(struct device *dev,
189                 struct device_attribute *attr, char *buf)
190 {
191         struct drm_device *ddev = dev_get_drvdata(dev);
192         struct amdgpu_device *adev = drm_to_adev(ddev);
193
194         return sysfs_emit(buf, "%s\n", adev->product_number);
195 }
196
197 static DEVICE_ATTR(product_number, S_IRUGO,
198                 amdgpu_device_get_product_number, NULL);
199
200 /**
201  * DOC: serial_number
202  *
203  * The amdgpu driver provides a sysfs API for reporting the serial number
204  * for the device
205  * The file serial_number is used for this and returns the serial number
206  * as returned from the FRU.
207  * NOTE: This is only available for certain server cards
208  */
209
210 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
211                 struct device_attribute *attr, char *buf)
212 {
213         struct drm_device *ddev = dev_get_drvdata(dev);
214         struct amdgpu_device *adev = drm_to_adev(ddev);
215
216         return sysfs_emit(buf, "%s\n", adev->serial);
217 }
218
219 static DEVICE_ATTR(serial_number, S_IRUGO,
220                 amdgpu_device_get_serial_number, NULL);
221
222 /**
223  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
224  *
225  * @dev: drm_device pointer
226  *
227  * Returns true if the device is a dGPU with ATPX power control,
228  * otherwise return false.
229  */
230 bool amdgpu_device_supports_px(struct drm_device *dev)
231 {
232         struct amdgpu_device *adev = drm_to_adev(dev);
233
234         if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
235                 return true;
236         return false;
237 }
238
239 /**
240  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
241  *
242  * @dev: drm_device pointer
243  *
244  * Returns true if the device is a dGPU with ACPI power control,
245  * otherwise return false.
246  */
247 bool amdgpu_device_supports_boco(struct drm_device *dev)
248 {
249         struct amdgpu_device *adev = drm_to_adev(dev);
250
251         if (adev->has_pr3 ||
252             ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
253                 return true;
254         return false;
255 }
256
257 /**
258  * amdgpu_device_supports_baco - Does the device support BACO
259  *
260  * @dev: drm_device pointer
261  *
262  * Returns true if the device supporte BACO,
263  * otherwise return false.
264  */
265 bool amdgpu_device_supports_baco(struct drm_device *dev)
266 {
267         struct amdgpu_device *adev = drm_to_adev(dev);
268
269         return amdgpu_asic_supports_baco(adev);
270 }
271
272 /**
273  * amdgpu_device_supports_smart_shift - Is the device dGPU with
274  * smart shift support
275  *
276  * @dev: drm_device pointer
277  *
278  * Returns true if the device is a dGPU with Smart Shift support,
279  * otherwise returns false.
280  */
281 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
282 {
283         return (amdgpu_device_supports_boco(dev) &&
284                 amdgpu_acpi_is_power_shift_control_supported());
285 }
286
287 /*
288  * VRAM access helper functions
289  */
290
291 /**
292  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
293  *
294  * @adev: amdgpu_device pointer
295  * @pos: offset of the buffer in vram
296  * @buf: virtual address of the buffer in system memory
297  * @size: read/write size, sizeof(@buf) must > @size
298  * @write: true - write to vram, otherwise - read from vram
299  */
300 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
301                              void *buf, size_t size, bool write)
302 {
303         unsigned long flags;
304         uint32_t hi = ~0, tmp = 0;
305         uint32_t *data = buf;
306         uint64_t last;
307         int idx;
308
309         if (!drm_dev_enter(adev_to_drm(adev), &idx))
310                 return;
311
312         BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
313
314         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
315         for (last = pos + size; pos < last; pos += 4) {
316                 tmp = pos >> 31;
317
318                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
319                 if (tmp != hi) {
320                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
321                         hi = tmp;
322                 }
323                 if (write)
324                         WREG32_NO_KIQ(mmMM_DATA, *data++);
325                 else
326                         *data++ = RREG32_NO_KIQ(mmMM_DATA);
327         }
328
329         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
330         drm_dev_exit(idx);
331 }
332
333 /**
334  * amdgpu_device_vram_access - access vram by vram aperature
335  *
336  * @adev: amdgpu_device pointer
337  * @pos: offset of the buffer in vram
338  * @buf: virtual address of the buffer in system memory
339  * @size: read/write size, sizeof(@buf) must > @size
340  * @write: true - write to vram, otherwise - read from vram
341  *
342  * The return value means how many bytes have been transferred.
343  */
344 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
345                                  void *buf, size_t size, bool write)
346 {
347 #ifdef CONFIG_64BIT
348         void __iomem *addr;
349         size_t count = 0;
350         uint64_t last;
351
352         if (!adev->mman.aper_base_kaddr)
353                 return 0;
354
355         last = min(pos + size, adev->gmc.visible_vram_size);
356         if (last > pos) {
357                 addr = adev->mman.aper_base_kaddr + pos;
358                 count = last - pos;
359
360                 if (write) {
361                         memcpy_toio(addr, buf, count);
362                         mb();
363                         amdgpu_device_flush_hdp(adev, NULL);
364                 } else {
365                         amdgpu_device_invalidate_hdp(adev, NULL);
366                         mb();
367                         memcpy_fromio(buf, addr, count);
368                 }
369
370         }
371
372         return count;
373 #else
374         return 0;
375 #endif
376 }
377
378 /**
379  * amdgpu_device_vram_access - read/write a buffer in vram
380  *
381  * @adev: amdgpu_device pointer
382  * @pos: offset of the buffer in vram
383  * @buf: virtual address of the buffer in system memory
384  * @size: read/write size, sizeof(@buf) must > @size
385  * @write: true - write to vram, otherwise - read from vram
386  */
387 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
388                                void *buf, size_t size, bool write)
389 {
390         size_t count;
391
392         /* try to using vram apreature to access vram first */
393         count = amdgpu_device_aper_access(adev, pos, buf, size, write);
394         size -= count;
395         if (size) {
396                 /* using MM to access rest vram */
397                 pos += count;
398                 buf += count;
399                 amdgpu_device_mm_access(adev, pos, buf, size, write);
400         }
401 }
402
403 /*
404  * register access helper functions.
405  */
406
407 /* Check if hw access should be skipped because of hotplug or device error */
408 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
409 {
410         if (adev->no_hw_access)
411                 return true;
412
413 #ifdef CONFIG_LOCKDEP
414         /*
415          * This is a bit complicated to understand, so worth a comment. What we assert
416          * here is that the GPU reset is not running on another thread in parallel.
417          *
418          * For this we trylock the read side of the reset semaphore, if that succeeds
419          * we know that the reset is not running in paralell.
420          *
421          * If the trylock fails we assert that we are either already holding the read
422          * side of the lock or are the reset thread itself and hold the write side of
423          * the lock.
424          */
425         if (in_task()) {
426                 if (down_read_trylock(&adev->reset_sem))
427                         up_read(&adev->reset_sem);
428                 else
429                         lockdep_assert_held(&adev->reset_sem);
430         }
431 #endif
432         return false;
433 }
434
435 /**
436  * amdgpu_device_rreg - read a memory mapped IO or indirect register
437  *
438  * @adev: amdgpu_device pointer
439  * @reg: dword aligned register offset
440  * @acc_flags: access flags which require special behavior
441  *
442  * Returns the 32 bit value from the offset specified.
443  */
444 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
445                             uint32_t reg, uint32_t acc_flags)
446 {
447         uint32_t ret;
448
449         if (amdgpu_device_skip_hw_access(adev))
450                 return 0;
451
452         if ((reg * 4) < adev->rmmio_size) {
453                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
454                     amdgpu_sriov_runtime(adev) &&
455                     down_read_trylock(&adev->reset_sem)) {
456                         ret = amdgpu_kiq_rreg(adev, reg);
457                         up_read(&adev->reset_sem);
458                 } else {
459                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
460                 }
461         } else {
462                 ret = adev->pcie_rreg(adev, reg * 4);
463         }
464
465         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
466
467         return ret;
468 }
469
470 /*
471  * MMIO register read with bytes helper functions
472  * @offset:bytes offset from MMIO start
473  *
474 */
475
476 /**
477  * amdgpu_mm_rreg8 - read a memory mapped IO register
478  *
479  * @adev: amdgpu_device pointer
480  * @offset: byte aligned register offset
481  *
482  * Returns the 8 bit value from the offset specified.
483  */
484 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
485 {
486         if (amdgpu_device_skip_hw_access(adev))
487                 return 0;
488
489         if (offset < adev->rmmio_size)
490                 return (readb(adev->rmmio + offset));
491         BUG();
492 }
493
494 /*
495  * MMIO register write with bytes helper functions
496  * @offset:bytes offset from MMIO start
497  * @value: the value want to be written to the register
498  *
499 */
500 /**
501  * amdgpu_mm_wreg8 - read a memory mapped IO register
502  *
503  * @adev: amdgpu_device pointer
504  * @offset: byte aligned register offset
505  * @value: 8 bit value to write
506  *
507  * Writes the value specified to the offset specified.
508  */
509 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
510 {
511         if (amdgpu_device_skip_hw_access(adev))
512                 return;
513
514         if (offset < adev->rmmio_size)
515                 writeb(value, adev->rmmio + offset);
516         else
517                 BUG();
518 }
519
520 /**
521  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
522  *
523  * @adev: amdgpu_device pointer
524  * @reg: dword aligned register offset
525  * @v: 32 bit value to write to the register
526  * @acc_flags: access flags which require special behavior
527  *
528  * Writes the value specified to the offset specified.
529  */
530 void amdgpu_device_wreg(struct amdgpu_device *adev,
531                         uint32_t reg, uint32_t v,
532                         uint32_t acc_flags)
533 {
534         if (amdgpu_device_skip_hw_access(adev))
535                 return;
536
537         if ((reg * 4) < adev->rmmio_size) {
538                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
539                     amdgpu_sriov_runtime(adev) &&
540                     down_read_trylock(&adev->reset_sem)) {
541                         amdgpu_kiq_wreg(adev, reg, v);
542                         up_read(&adev->reset_sem);
543                 } else {
544                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
545                 }
546         } else {
547                 adev->pcie_wreg(adev, reg * 4, v);
548         }
549
550         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
551 }
552
553 /*
554  * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range
555  *
556  * this function is invoked only the debugfs register access
557  * */
558 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
559                              uint32_t reg, uint32_t v)
560 {
561         if (amdgpu_device_skip_hw_access(adev))
562                 return;
563
564         if (amdgpu_sriov_fullaccess(adev) &&
565             adev->gfx.rlc.funcs &&
566             adev->gfx.rlc.funcs->is_rlcg_access_range) {
567                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
568                         return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
569         } else {
570                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
571         }
572 }
573
574 /**
575  * amdgpu_mm_rdoorbell - read a doorbell dword
576  *
577  * @adev: amdgpu_device pointer
578  * @index: doorbell index
579  *
580  * Returns the value in the doorbell aperture at the
581  * requested doorbell index (CIK).
582  */
583 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
584 {
585         if (amdgpu_device_skip_hw_access(adev))
586                 return 0;
587
588         if (index < adev->doorbell.num_doorbells) {
589                 return readl(adev->doorbell.ptr + index);
590         } else {
591                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
592                 return 0;
593         }
594 }
595
596 /**
597  * amdgpu_mm_wdoorbell - write a doorbell dword
598  *
599  * @adev: amdgpu_device pointer
600  * @index: doorbell index
601  * @v: value to write
602  *
603  * Writes @v to the doorbell aperture at the
604  * requested doorbell index (CIK).
605  */
606 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
607 {
608         if (amdgpu_device_skip_hw_access(adev))
609                 return;
610
611         if (index < adev->doorbell.num_doorbells) {
612                 writel(v, adev->doorbell.ptr + index);
613         } else {
614                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
615         }
616 }
617
618 /**
619  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
620  *
621  * @adev: amdgpu_device pointer
622  * @index: doorbell index
623  *
624  * Returns the value in the doorbell aperture at the
625  * requested doorbell index (VEGA10+).
626  */
627 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
628 {
629         if (amdgpu_device_skip_hw_access(adev))
630                 return 0;
631
632         if (index < adev->doorbell.num_doorbells) {
633                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
634         } else {
635                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
636                 return 0;
637         }
638 }
639
640 /**
641  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
642  *
643  * @adev: amdgpu_device pointer
644  * @index: doorbell index
645  * @v: value to write
646  *
647  * Writes @v to the doorbell aperture at the
648  * requested doorbell index (VEGA10+).
649  */
650 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
651 {
652         if (amdgpu_device_skip_hw_access(adev))
653                 return;
654
655         if (index < adev->doorbell.num_doorbells) {
656                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
657         } else {
658                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
659         }
660 }
661
662 /**
663  * amdgpu_device_indirect_rreg - read an indirect register
664  *
665  * @adev: amdgpu_device pointer
666  * @pcie_index: mmio register offset
667  * @pcie_data: mmio register offset
668  * @reg_addr: indirect register address to read from
669  *
670  * Returns the value of indirect register @reg_addr
671  */
672 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
673                                 u32 pcie_index, u32 pcie_data,
674                                 u32 reg_addr)
675 {
676         unsigned long flags;
677         u32 r;
678         void __iomem *pcie_index_offset;
679         void __iomem *pcie_data_offset;
680
681         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
682         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
683         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
684
685         writel(reg_addr, pcie_index_offset);
686         readl(pcie_index_offset);
687         r = readl(pcie_data_offset);
688         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
689
690         return r;
691 }
692
693 /**
694  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
695  *
696  * @adev: amdgpu_device pointer
697  * @pcie_index: mmio register offset
698  * @pcie_data: mmio register offset
699  * @reg_addr: indirect register address to read from
700  *
701  * Returns the value of indirect register @reg_addr
702  */
703 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
704                                   u32 pcie_index, u32 pcie_data,
705                                   u32 reg_addr)
706 {
707         unsigned long flags;
708         u64 r;
709         void __iomem *pcie_index_offset;
710         void __iomem *pcie_data_offset;
711
712         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
713         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
714         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
715
716         /* read low 32 bits */
717         writel(reg_addr, pcie_index_offset);
718         readl(pcie_index_offset);
719         r = readl(pcie_data_offset);
720         /* read high 32 bits */
721         writel(reg_addr + 4, pcie_index_offset);
722         readl(pcie_index_offset);
723         r |= ((u64)readl(pcie_data_offset) << 32);
724         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
725
726         return r;
727 }
728
729 /**
730  * amdgpu_device_indirect_wreg - write an indirect register address
731  *
732  * @adev: amdgpu_device pointer
733  * @pcie_index: mmio register offset
734  * @pcie_data: mmio register offset
735  * @reg_addr: indirect register offset
736  * @reg_data: indirect register data
737  *
738  */
739 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
740                                  u32 pcie_index, u32 pcie_data,
741                                  u32 reg_addr, u32 reg_data)
742 {
743         unsigned long flags;
744         void __iomem *pcie_index_offset;
745         void __iomem *pcie_data_offset;
746
747         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
748         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
749         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
750
751         writel(reg_addr, pcie_index_offset);
752         readl(pcie_index_offset);
753         writel(reg_data, pcie_data_offset);
754         readl(pcie_data_offset);
755         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
756 }
757
758 /**
759  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
760  *
761  * @adev: amdgpu_device pointer
762  * @pcie_index: mmio register offset
763  * @pcie_data: mmio register offset
764  * @reg_addr: indirect register offset
765  * @reg_data: indirect register data
766  *
767  */
768 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
769                                    u32 pcie_index, u32 pcie_data,
770                                    u32 reg_addr, u64 reg_data)
771 {
772         unsigned long flags;
773         void __iomem *pcie_index_offset;
774         void __iomem *pcie_data_offset;
775
776         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
777         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
778         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
779
780         /* write low 32 bits */
781         writel(reg_addr, pcie_index_offset);
782         readl(pcie_index_offset);
783         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
784         readl(pcie_data_offset);
785         /* write high 32 bits */
786         writel(reg_addr + 4, pcie_index_offset);
787         readl(pcie_index_offset);
788         writel((u32)(reg_data >> 32), pcie_data_offset);
789         readl(pcie_data_offset);
790         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
791 }
792
793 /**
794  * amdgpu_invalid_rreg - dummy reg read function
795  *
796  * @adev: amdgpu_device pointer
797  * @reg: offset of register
798  *
799  * Dummy register read function.  Used for register blocks
800  * that certain asics don't have (all asics).
801  * Returns the value in the register.
802  */
803 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
804 {
805         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
806         BUG();
807         return 0;
808 }
809
810 /**
811  * amdgpu_invalid_wreg - dummy reg write function
812  *
813  * @adev: amdgpu_device pointer
814  * @reg: offset of register
815  * @v: value to write to the register
816  *
817  * Dummy register read function.  Used for register blocks
818  * that certain asics don't have (all asics).
819  */
820 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
821 {
822         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
823                   reg, v);
824         BUG();
825 }
826
827 /**
828  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
829  *
830  * @adev: amdgpu_device pointer
831  * @reg: offset of register
832  *
833  * Dummy register read function.  Used for register blocks
834  * that certain asics don't have (all asics).
835  * Returns the value in the register.
836  */
837 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
838 {
839         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
840         BUG();
841         return 0;
842 }
843
844 /**
845  * amdgpu_invalid_wreg64 - dummy reg write function
846  *
847  * @adev: amdgpu_device pointer
848  * @reg: offset of register
849  * @v: value to write to the register
850  *
851  * Dummy register read function.  Used for register blocks
852  * that certain asics don't have (all asics).
853  */
854 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
855 {
856         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
857                   reg, v);
858         BUG();
859 }
860
861 /**
862  * amdgpu_block_invalid_rreg - dummy reg read function
863  *
864  * @adev: amdgpu_device pointer
865  * @block: offset of instance
866  * @reg: offset of register
867  *
868  * Dummy register read function.  Used for register blocks
869  * that certain asics don't have (all asics).
870  * Returns the value in the register.
871  */
872 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
873                                           uint32_t block, uint32_t reg)
874 {
875         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
876                   reg, block);
877         BUG();
878         return 0;
879 }
880
881 /**
882  * amdgpu_block_invalid_wreg - dummy reg write function
883  *
884  * @adev: amdgpu_device pointer
885  * @block: offset of instance
886  * @reg: offset of register
887  * @v: value to write to the register
888  *
889  * Dummy register read function.  Used for register blocks
890  * that certain asics don't have (all asics).
891  */
892 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
893                                       uint32_t block,
894                                       uint32_t reg, uint32_t v)
895 {
896         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
897                   reg, block, v);
898         BUG();
899 }
900
901 /**
902  * amdgpu_device_asic_init - Wrapper for atom asic_init
903  *
904  * @adev: amdgpu_device pointer
905  *
906  * Does any asic specific work and then calls atom asic init.
907  */
908 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
909 {
910         amdgpu_asic_pre_asic_init(adev);
911
912         return amdgpu_atom_asic_init(adev->mode_info.atom_context);
913 }
914
915 /**
916  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
917  *
918  * @adev: amdgpu_device pointer
919  *
920  * Allocates a scratch page of VRAM for use by various things in the
921  * driver.
922  */
923 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
924 {
925         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
926                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
927                                        &adev->vram_scratch.robj,
928                                        &adev->vram_scratch.gpu_addr,
929                                        (void **)&adev->vram_scratch.ptr);
930 }
931
932 /**
933  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
934  *
935  * @adev: amdgpu_device pointer
936  *
937  * Frees the VRAM scratch page.
938  */
939 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
940 {
941         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
942 }
943
944 /**
945  * amdgpu_device_program_register_sequence - program an array of registers.
946  *
947  * @adev: amdgpu_device pointer
948  * @registers: pointer to the register array
949  * @array_size: size of the register array
950  *
951  * Programs an array or registers with and and or masks.
952  * This is a helper for setting golden registers.
953  */
954 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
955                                              const u32 *registers,
956                                              const u32 array_size)
957 {
958         u32 tmp, reg, and_mask, or_mask;
959         int i;
960
961         if (array_size % 3)
962                 return;
963
964         for (i = 0; i < array_size; i +=3) {
965                 reg = registers[i + 0];
966                 and_mask = registers[i + 1];
967                 or_mask = registers[i + 2];
968
969                 if (and_mask == 0xffffffff) {
970                         tmp = or_mask;
971                 } else {
972                         tmp = RREG32(reg);
973                         tmp &= ~and_mask;
974                         if (adev->family >= AMDGPU_FAMILY_AI)
975                                 tmp |= (or_mask & and_mask);
976                         else
977                                 tmp |= or_mask;
978                 }
979                 WREG32(reg, tmp);
980         }
981 }
982
983 /**
984  * amdgpu_device_pci_config_reset - reset the GPU
985  *
986  * @adev: amdgpu_device pointer
987  *
988  * Resets the GPU using the pci config reset sequence.
989  * Only applicable to asics prior to vega10.
990  */
991 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
992 {
993         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
994 }
995
996 /**
997  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
998  *
999  * @adev: amdgpu_device pointer
1000  *
1001  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1002  */
1003 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1004 {
1005         return pci_reset_function(adev->pdev);
1006 }
1007
1008 /*
1009  * GPU doorbell aperture helpers function.
1010  */
1011 /**
1012  * amdgpu_device_doorbell_init - Init doorbell driver information.
1013  *
1014  * @adev: amdgpu_device pointer
1015  *
1016  * Init doorbell driver information (CIK)
1017  * Returns 0 on success, error on failure.
1018  */
1019 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1020 {
1021
1022         /* No doorbell on SI hardware generation */
1023         if (adev->asic_type < CHIP_BONAIRE) {
1024                 adev->doorbell.base = 0;
1025                 adev->doorbell.size = 0;
1026                 adev->doorbell.num_doorbells = 0;
1027                 adev->doorbell.ptr = NULL;
1028                 return 0;
1029         }
1030
1031         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1032                 return -EINVAL;
1033
1034         amdgpu_asic_init_doorbell_index(adev);
1035
1036         /* doorbell bar mapping */
1037         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1038         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1039
1040         adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
1041                                              adev->doorbell_index.max_assignment+1);
1042         if (adev->doorbell.num_doorbells == 0)
1043                 return -EINVAL;
1044
1045         /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1046          * paging queue doorbell use the second page. The
1047          * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1048          * doorbells are in the first page. So with paging queue enabled,
1049          * the max num_doorbells should + 1 page (0x400 in dword)
1050          */
1051         if (adev->asic_type >= CHIP_VEGA10)
1052                 adev->doorbell.num_doorbells += 0x400;
1053
1054         adev->doorbell.ptr = ioremap(adev->doorbell.base,
1055                                      adev->doorbell.num_doorbells *
1056                                      sizeof(u32));
1057         if (adev->doorbell.ptr == NULL)
1058                 return -ENOMEM;
1059
1060         return 0;
1061 }
1062
1063 /**
1064  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1065  *
1066  * @adev: amdgpu_device pointer
1067  *
1068  * Tear down doorbell driver information (CIK)
1069  */
1070 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1071 {
1072         iounmap(adev->doorbell.ptr);
1073         adev->doorbell.ptr = NULL;
1074 }
1075
1076
1077
1078 /*
1079  * amdgpu_device_wb_*()
1080  * Writeback is the method by which the GPU updates special pages in memory
1081  * with the status of certain GPU events (fences, ring pointers,etc.).
1082  */
1083
1084 /**
1085  * amdgpu_device_wb_fini - Disable Writeback and free memory
1086  *
1087  * @adev: amdgpu_device pointer
1088  *
1089  * Disables Writeback and frees the Writeback memory (all asics).
1090  * Used at driver shutdown.
1091  */
1092 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1093 {
1094         if (adev->wb.wb_obj) {
1095                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1096                                       &adev->wb.gpu_addr,
1097                                       (void **)&adev->wb.wb);
1098                 adev->wb.wb_obj = NULL;
1099         }
1100 }
1101
1102 /**
1103  * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
1104  *
1105  * @adev: amdgpu_device pointer
1106  *
1107  * Initializes writeback and allocates writeback memory (all asics).
1108  * Used at driver startup.
1109  * Returns 0 on success or an -error on failure.
1110  */
1111 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1112 {
1113         int r;
1114
1115         if (adev->wb.wb_obj == NULL) {
1116                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1117                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1118                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1119                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1120                                             (void **)&adev->wb.wb);
1121                 if (r) {
1122                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1123                         return r;
1124                 }
1125
1126                 adev->wb.num_wb = AMDGPU_MAX_WB;
1127                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1128
1129                 /* clear wb memory */
1130                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1131         }
1132
1133         return 0;
1134 }
1135
1136 /**
1137  * amdgpu_device_wb_get - Allocate a wb entry
1138  *
1139  * @adev: amdgpu_device pointer
1140  * @wb: wb index
1141  *
1142  * Allocate a wb slot for use by the driver (all asics).
1143  * Returns 0 on success or -EINVAL on failure.
1144  */
1145 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1146 {
1147         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1148
1149         if (offset < adev->wb.num_wb) {
1150                 __set_bit(offset, adev->wb.used);
1151                 *wb = offset << 3; /* convert to dw offset */
1152                 return 0;
1153         } else {
1154                 return -EINVAL;
1155         }
1156 }
1157
1158 /**
1159  * amdgpu_device_wb_free - Free a wb entry
1160  *
1161  * @adev: amdgpu_device pointer
1162  * @wb: wb index
1163  *
1164  * Free a wb slot allocated for use by the driver (all asics)
1165  */
1166 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1167 {
1168         wb >>= 3;
1169         if (wb < adev->wb.num_wb)
1170                 __clear_bit(wb, adev->wb.used);
1171 }
1172
1173 /**
1174  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1175  *
1176  * @adev: amdgpu_device pointer
1177  *
1178  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1179  * to fail, but if any of the BARs is not accessible after the size we abort
1180  * driver loading by returning -ENODEV.
1181  */
1182 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1183 {
1184         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1185         struct pci_bus *root;
1186         struct resource *res;
1187         unsigned i;
1188         u16 cmd;
1189         int r;
1190
1191         /* Bypass for VF */
1192         if (amdgpu_sriov_vf(adev))
1193                 return 0;
1194
1195         /* skip if the bios has already enabled large BAR */
1196         if (adev->gmc.real_vram_size &&
1197             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1198                 return 0;
1199
1200         /* Check if the root BUS has 64bit memory resources */
1201         root = adev->pdev->bus;
1202         while (root->parent)
1203                 root = root->parent;
1204
1205         pci_bus_for_each_resource(root, res, i) {
1206                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1207                     res->start > 0x100000000ull)
1208                         break;
1209         }
1210
1211         /* Trying to resize is pointless without a root hub window above 4GB */
1212         if (!res)
1213                 return 0;
1214
1215         /* Limit the BAR size to what is available */
1216         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1217                         rbar_size);
1218
1219         /* Disable memory decoding while we change the BAR addresses and size */
1220         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1221         pci_write_config_word(adev->pdev, PCI_COMMAND,
1222                               cmd & ~PCI_COMMAND_MEMORY);
1223
1224         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1225         amdgpu_device_doorbell_fini(adev);
1226         if (adev->asic_type >= CHIP_BONAIRE)
1227                 pci_release_resource(adev->pdev, 2);
1228
1229         pci_release_resource(adev->pdev, 0);
1230
1231         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1232         if (r == -ENOSPC)
1233                 DRM_INFO("Not enough PCI address space for a large BAR.");
1234         else if (r && r != -ENOTSUPP)
1235                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1236
1237         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1238
1239         /* When the doorbell or fb BAR isn't available we have no chance of
1240          * using the device.
1241          */
1242         r = amdgpu_device_doorbell_init(adev);
1243         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1244                 return -ENODEV;
1245
1246         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1247
1248         return 0;
1249 }
1250
1251 /*
1252  * GPU helpers function.
1253  */
1254 /**
1255  * amdgpu_device_need_post - check if the hw need post or not
1256  *
1257  * @adev: amdgpu_device pointer
1258  *
1259  * Check if the asic has been initialized (all asics) at driver startup
1260  * or post is needed if  hw reset is performed.
1261  * Returns true if need or false if not.
1262  */
1263 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1264 {
1265         uint32_t reg;
1266
1267         if (amdgpu_sriov_vf(adev))
1268                 return false;
1269
1270         if (amdgpu_passthrough(adev)) {
1271                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1272                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1273                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1274                  * vpost executed for smc version below 22.15
1275                  */
1276                 if (adev->asic_type == CHIP_FIJI) {
1277                         int err;
1278                         uint32_t fw_ver;
1279                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1280                         /* force vPost if error occured */
1281                         if (err)
1282                                 return true;
1283
1284                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1285                         if (fw_ver < 0x00160e00)
1286                                 return true;
1287                 }
1288         }
1289
1290         /* Don't post if we need to reset whole hive on init */
1291         if (adev->gmc.xgmi.pending_reset)
1292                 return false;
1293
1294         if (adev->has_hw_reset) {
1295                 adev->has_hw_reset = false;
1296                 return true;
1297         }
1298
1299         /* bios scratch used on CIK+ */
1300         if (adev->asic_type >= CHIP_BONAIRE)
1301                 return amdgpu_atombios_scratch_need_asic_init(adev);
1302
1303         /* check MEM_SIZE for older asics */
1304         reg = amdgpu_asic_get_config_memsize(adev);
1305
1306         if ((reg != 0) && (reg != 0xffffffff))
1307                 return false;
1308
1309         return true;
1310 }
1311
1312 /* if we get transitioned to only one device, take VGA back */
1313 /**
1314  * amdgpu_device_vga_set_decode - enable/disable vga decode
1315  *
1316  * @pdev: PCI device pointer
1317  * @state: enable/disable vga decode
1318  *
1319  * Enable/disable vga decode (all asics).
1320  * Returns VGA resource flags.
1321  */
1322 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1323                 bool state)
1324 {
1325         struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1326         amdgpu_asic_set_vga_state(adev, state);
1327         if (state)
1328                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1329                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1330         else
1331                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1332 }
1333
1334 /**
1335  * amdgpu_device_check_block_size - validate the vm block size
1336  *
1337  * @adev: amdgpu_device pointer
1338  *
1339  * Validates the vm block size specified via module parameter.
1340  * The vm block size defines number of bits in page table versus page directory,
1341  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1342  * page table and the remaining bits are in the page directory.
1343  */
1344 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1345 {
1346         /* defines number of bits in page table versus page directory,
1347          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1348          * page table and the remaining bits are in the page directory */
1349         if (amdgpu_vm_block_size == -1)
1350                 return;
1351
1352         if (amdgpu_vm_block_size < 9) {
1353                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1354                          amdgpu_vm_block_size);
1355                 amdgpu_vm_block_size = -1;
1356         }
1357 }
1358
1359 /**
1360  * amdgpu_device_check_vm_size - validate the vm size
1361  *
1362  * @adev: amdgpu_device pointer
1363  *
1364  * Validates the vm size in GB specified via module parameter.
1365  * The VM size is the size of the GPU virtual memory space in GB.
1366  */
1367 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1368 {
1369         /* no need to check the default value */
1370         if (amdgpu_vm_size == -1)
1371                 return;
1372
1373         if (amdgpu_vm_size < 1) {
1374                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1375                          amdgpu_vm_size);
1376                 amdgpu_vm_size = -1;
1377         }
1378 }
1379
1380 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1381 {
1382         struct sysinfo si;
1383         bool is_os_64 = (sizeof(void *) == 8);
1384         uint64_t total_memory;
1385         uint64_t dram_size_seven_GB = 0x1B8000000;
1386         uint64_t dram_size_three_GB = 0xB8000000;
1387
1388         if (amdgpu_smu_memory_pool_size == 0)
1389                 return;
1390
1391         if (!is_os_64) {
1392                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1393                 goto def_value;
1394         }
1395         si_meminfo(&si);
1396         total_memory = (uint64_t)si.totalram * si.mem_unit;
1397
1398         if ((amdgpu_smu_memory_pool_size == 1) ||
1399                 (amdgpu_smu_memory_pool_size == 2)) {
1400                 if (total_memory < dram_size_three_GB)
1401                         goto def_value1;
1402         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1403                 (amdgpu_smu_memory_pool_size == 8)) {
1404                 if (total_memory < dram_size_seven_GB)
1405                         goto def_value1;
1406         } else {
1407                 DRM_WARN("Smu memory pool size not supported\n");
1408                 goto def_value;
1409         }
1410         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1411
1412         return;
1413
1414 def_value1:
1415         DRM_WARN("No enough system memory\n");
1416 def_value:
1417         adev->pm.smu_prv_buffer_size = 0;
1418 }
1419
1420 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1421 {
1422         if (!(adev->flags & AMD_IS_APU) ||
1423             adev->asic_type < CHIP_RAVEN)
1424                 return 0;
1425
1426         switch (adev->asic_type) {
1427         case CHIP_RAVEN:
1428                 if (adev->pdev->device == 0x15dd)
1429                         adev->apu_flags |= AMD_APU_IS_RAVEN;
1430                 if (adev->pdev->device == 0x15d8)
1431                         adev->apu_flags |= AMD_APU_IS_PICASSO;
1432                 break;
1433         case CHIP_RENOIR:
1434                 if ((adev->pdev->device == 0x1636) ||
1435                     (adev->pdev->device == 0x164c))
1436                         adev->apu_flags |= AMD_APU_IS_RENOIR;
1437                 else
1438                         adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1439                 break;
1440         case CHIP_VANGOGH:
1441                 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1442                 break;
1443         case CHIP_YELLOW_CARP:
1444                 break;
1445         case CHIP_CYAN_SKILLFISH:
1446                 if (adev->pdev->device == 0x13FE)
1447                         adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1448                 break;
1449         default:
1450                 return -EINVAL;
1451         }
1452
1453         return 0;
1454 }
1455
1456 /**
1457  * amdgpu_device_check_arguments - validate module params
1458  *
1459  * @adev: amdgpu_device pointer
1460  *
1461  * Validates certain module parameters and updates
1462  * the associated values used by the driver (all asics).
1463  */
1464 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1465 {
1466         if (amdgpu_sched_jobs < 4) {
1467                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1468                          amdgpu_sched_jobs);
1469                 amdgpu_sched_jobs = 4;
1470         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1471                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1472                          amdgpu_sched_jobs);
1473                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1474         }
1475
1476         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1477                 /* gart size must be greater or equal to 32M */
1478                 dev_warn(adev->dev, "gart size (%d) too small\n",
1479                          amdgpu_gart_size);
1480                 amdgpu_gart_size = -1;
1481         }
1482
1483         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1484                 /* gtt size must be greater or equal to 32M */
1485                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1486                                  amdgpu_gtt_size);
1487                 amdgpu_gtt_size = -1;
1488         }
1489
1490         /* valid range is between 4 and 9 inclusive */
1491         if (amdgpu_vm_fragment_size != -1 &&
1492             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1493                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1494                 amdgpu_vm_fragment_size = -1;
1495         }
1496
1497         if (amdgpu_sched_hw_submission < 2) {
1498                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1499                          amdgpu_sched_hw_submission);
1500                 amdgpu_sched_hw_submission = 2;
1501         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1502                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1503                          amdgpu_sched_hw_submission);
1504                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1505         }
1506
1507         amdgpu_device_check_smu_prv_buffer_size(adev);
1508
1509         amdgpu_device_check_vm_size(adev);
1510
1511         amdgpu_device_check_block_size(adev);
1512
1513         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1514
1515         amdgpu_gmc_tmz_set(adev);
1516
1517         amdgpu_gmc_noretry_set(adev);
1518
1519         return 0;
1520 }
1521
1522 /**
1523  * amdgpu_switcheroo_set_state - set switcheroo state
1524  *
1525  * @pdev: pci dev pointer
1526  * @state: vga_switcheroo state
1527  *
1528  * Callback for the switcheroo driver.  Suspends or resumes the
1529  * the asics before or after it is powered up using ACPI methods.
1530  */
1531 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1532                                         enum vga_switcheroo_state state)
1533 {
1534         struct drm_device *dev = pci_get_drvdata(pdev);
1535         int r;
1536
1537         if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1538                 return;
1539
1540         if (state == VGA_SWITCHEROO_ON) {
1541                 pr_info("switched on\n");
1542                 /* don't suspend or resume card normally */
1543                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1544
1545                 pci_set_power_state(pdev, PCI_D0);
1546                 amdgpu_device_load_pci_state(pdev);
1547                 r = pci_enable_device(pdev);
1548                 if (r)
1549                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1550                 amdgpu_device_resume(dev, true);
1551
1552                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1553         } else {
1554                 pr_info("switched off\n");
1555                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1556                 amdgpu_device_suspend(dev, true);
1557                 amdgpu_device_cache_pci_state(pdev);
1558                 /* Shut down the device */
1559                 pci_disable_device(pdev);
1560                 pci_set_power_state(pdev, PCI_D3cold);
1561                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1562         }
1563 }
1564
1565 /**
1566  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1567  *
1568  * @pdev: pci dev pointer
1569  *
1570  * Callback for the switcheroo driver.  Check of the switcheroo
1571  * state can be changed.
1572  * Returns true if the state can be changed, false if not.
1573  */
1574 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1575 {
1576         struct drm_device *dev = pci_get_drvdata(pdev);
1577
1578         /*
1579         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1580         * locking inversion with the driver load path. And the access here is
1581         * completely racy anyway. So don't bother with locking for now.
1582         */
1583         return atomic_read(&dev->open_count) == 0;
1584 }
1585
1586 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1587         .set_gpu_state = amdgpu_switcheroo_set_state,
1588         .reprobe = NULL,
1589         .can_switch = amdgpu_switcheroo_can_switch,
1590 };
1591
1592 /**
1593  * amdgpu_device_ip_set_clockgating_state - set the CG state
1594  *
1595  * @dev: amdgpu_device pointer
1596  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1597  * @state: clockgating state (gate or ungate)
1598  *
1599  * Sets the requested clockgating state for all instances of
1600  * the hardware IP specified.
1601  * Returns the error code from the last instance.
1602  */
1603 int amdgpu_device_ip_set_clockgating_state(void *dev,
1604                                            enum amd_ip_block_type block_type,
1605                                            enum amd_clockgating_state state)
1606 {
1607         struct amdgpu_device *adev = dev;
1608         int i, r = 0;
1609
1610         for (i = 0; i < adev->num_ip_blocks; i++) {
1611                 if (!adev->ip_blocks[i].status.valid)
1612                         continue;
1613                 if (adev->ip_blocks[i].version->type != block_type)
1614                         continue;
1615                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1616                         continue;
1617                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1618                         (void *)adev, state);
1619                 if (r)
1620                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1621                                   adev->ip_blocks[i].version->funcs->name, r);
1622         }
1623         return r;
1624 }
1625
1626 /**
1627  * amdgpu_device_ip_set_powergating_state - set the PG state
1628  *
1629  * @dev: amdgpu_device pointer
1630  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1631  * @state: powergating state (gate or ungate)
1632  *
1633  * Sets the requested powergating state for all instances of
1634  * the hardware IP specified.
1635  * Returns the error code from the last instance.
1636  */
1637 int amdgpu_device_ip_set_powergating_state(void *dev,
1638                                            enum amd_ip_block_type block_type,
1639                                            enum amd_powergating_state state)
1640 {
1641         struct amdgpu_device *adev = dev;
1642         int i, r = 0;
1643
1644         for (i = 0; i < adev->num_ip_blocks; i++) {
1645                 if (!adev->ip_blocks[i].status.valid)
1646                         continue;
1647                 if (adev->ip_blocks[i].version->type != block_type)
1648                         continue;
1649                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1650                         continue;
1651                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1652                         (void *)adev, state);
1653                 if (r)
1654                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1655                                   adev->ip_blocks[i].version->funcs->name, r);
1656         }
1657         return r;
1658 }
1659
1660 /**
1661  * amdgpu_device_ip_get_clockgating_state - get the CG state
1662  *
1663  * @adev: amdgpu_device pointer
1664  * @flags: clockgating feature flags
1665  *
1666  * Walks the list of IPs on the device and updates the clockgating
1667  * flags for each IP.
1668  * Updates @flags with the feature flags for each hardware IP where
1669  * clockgating is enabled.
1670  */
1671 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1672                                             u32 *flags)
1673 {
1674         int i;
1675
1676         for (i = 0; i < adev->num_ip_blocks; i++) {
1677                 if (!adev->ip_blocks[i].status.valid)
1678                         continue;
1679                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1680                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1681         }
1682 }
1683
1684 /**
1685  * amdgpu_device_ip_wait_for_idle - wait for idle
1686  *
1687  * @adev: amdgpu_device pointer
1688  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1689  *
1690  * Waits for the request hardware IP to be idle.
1691  * Returns 0 for success or a negative error code on failure.
1692  */
1693 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1694                                    enum amd_ip_block_type block_type)
1695 {
1696         int i, r;
1697
1698         for (i = 0; i < adev->num_ip_blocks; i++) {
1699                 if (!adev->ip_blocks[i].status.valid)
1700                         continue;
1701                 if (adev->ip_blocks[i].version->type == block_type) {
1702                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1703                         if (r)
1704                                 return r;
1705                         break;
1706                 }
1707         }
1708         return 0;
1709
1710 }
1711
1712 /**
1713  * amdgpu_device_ip_is_idle - is the hardware IP idle
1714  *
1715  * @adev: amdgpu_device pointer
1716  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1717  *
1718  * Check if the hardware IP is idle or not.
1719  * Returns true if it the IP is idle, false if not.
1720  */
1721 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1722                               enum amd_ip_block_type block_type)
1723 {
1724         int i;
1725
1726         for (i = 0; i < adev->num_ip_blocks; i++) {
1727                 if (!adev->ip_blocks[i].status.valid)
1728                         continue;
1729                 if (adev->ip_blocks[i].version->type == block_type)
1730                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1731         }
1732         return true;
1733
1734 }
1735
1736 /**
1737  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1738  *
1739  * @adev: amdgpu_device pointer
1740  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1741  *
1742  * Returns a pointer to the hardware IP block structure
1743  * if it exists for the asic, otherwise NULL.
1744  */
1745 struct amdgpu_ip_block *
1746 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1747                               enum amd_ip_block_type type)
1748 {
1749         int i;
1750
1751         for (i = 0; i < adev->num_ip_blocks; i++)
1752                 if (adev->ip_blocks[i].version->type == type)
1753                         return &adev->ip_blocks[i];
1754
1755         return NULL;
1756 }
1757
1758 /**
1759  * amdgpu_device_ip_block_version_cmp
1760  *
1761  * @adev: amdgpu_device pointer
1762  * @type: enum amd_ip_block_type
1763  * @major: major version
1764  * @minor: minor version
1765  *
1766  * return 0 if equal or greater
1767  * return 1 if smaller or the ip_block doesn't exist
1768  */
1769 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1770                                        enum amd_ip_block_type type,
1771                                        u32 major, u32 minor)
1772 {
1773         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1774
1775         if (ip_block && ((ip_block->version->major > major) ||
1776                         ((ip_block->version->major == major) &&
1777                         (ip_block->version->minor >= minor))))
1778                 return 0;
1779
1780         return 1;
1781 }
1782
1783 /**
1784  * amdgpu_device_ip_block_add
1785  *
1786  * @adev: amdgpu_device pointer
1787  * @ip_block_version: pointer to the IP to add
1788  *
1789  * Adds the IP block driver information to the collection of IPs
1790  * on the asic.
1791  */
1792 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1793                                const struct amdgpu_ip_block_version *ip_block_version)
1794 {
1795         if (!ip_block_version)
1796                 return -EINVAL;
1797
1798         switch (ip_block_version->type) {
1799         case AMD_IP_BLOCK_TYPE_VCN:
1800                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1801                         return 0;
1802                 break;
1803         case AMD_IP_BLOCK_TYPE_JPEG:
1804                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1805                         return 0;
1806                 break;
1807         default:
1808                 break;
1809         }
1810
1811         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1812                   ip_block_version->funcs->name);
1813
1814         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1815
1816         return 0;
1817 }
1818
1819 /**
1820  * amdgpu_device_enable_virtual_display - enable virtual display feature
1821  *
1822  * @adev: amdgpu_device pointer
1823  *
1824  * Enabled the virtual display feature if the user has enabled it via
1825  * the module parameter virtual_display.  This feature provides a virtual
1826  * display hardware on headless boards or in virtualized environments.
1827  * This function parses and validates the configuration string specified by
1828  * the user and configues the virtual display configuration (number of
1829  * virtual connectors, crtcs, etc.) specified.
1830  */
1831 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1832 {
1833         adev->enable_virtual_display = false;
1834
1835         if (amdgpu_virtual_display) {
1836                 const char *pci_address_name = pci_name(adev->pdev);
1837                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1838
1839                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1840                 pciaddstr_tmp = pciaddstr;
1841                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1842                         pciaddname = strsep(&pciaddname_tmp, ",");
1843                         if (!strcmp("all", pciaddname)
1844                             || !strcmp(pci_address_name, pciaddname)) {
1845                                 long num_crtc;
1846                                 int res = -1;
1847
1848                                 adev->enable_virtual_display = true;
1849
1850                                 if (pciaddname_tmp)
1851                                         res = kstrtol(pciaddname_tmp, 10,
1852                                                       &num_crtc);
1853
1854                                 if (!res) {
1855                                         if (num_crtc < 1)
1856                                                 num_crtc = 1;
1857                                         if (num_crtc > 6)
1858                                                 num_crtc = 6;
1859                                         adev->mode_info.num_crtc = num_crtc;
1860                                 } else {
1861                                         adev->mode_info.num_crtc = 1;
1862                                 }
1863                                 break;
1864                         }
1865                 }
1866
1867                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1868                          amdgpu_virtual_display, pci_address_name,
1869                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1870
1871                 kfree(pciaddstr);
1872         }
1873 }
1874
1875 /**
1876  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1877  *
1878  * @adev: amdgpu_device pointer
1879  *
1880  * Parses the asic configuration parameters specified in the gpu info
1881  * firmware and makes them availale to the driver for use in configuring
1882  * the asic.
1883  * Returns 0 on success, -EINVAL on failure.
1884  */
1885 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1886 {
1887         const char *chip_name;
1888         char fw_name[40];
1889         int err;
1890         const struct gpu_info_firmware_header_v1_0 *hdr;
1891
1892         adev->firmware.gpu_info_fw = NULL;
1893
1894         if (adev->mman.discovery_bin) {
1895                 amdgpu_discovery_get_gfx_info(adev);
1896
1897                 /*
1898                  * FIXME: The bounding box is still needed by Navi12, so
1899                  * temporarily read it from gpu_info firmware. Should be droped
1900                  * when DAL no longer needs it.
1901                  */
1902                 if (adev->asic_type != CHIP_NAVI12)
1903                         return 0;
1904         }
1905
1906         switch (adev->asic_type) {
1907 #ifdef CONFIG_DRM_AMDGPU_SI
1908         case CHIP_VERDE:
1909         case CHIP_TAHITI:
1910         case CHIP_PITCAIRN:
1911         case CHIP_OLAND:
1912         case CHIP_HAINAN:
1913 #endif
1914 #ifdef CONFIG_DRM_AMDGPU_CIK
1915         case CHIP_BONAIRE:
1916         case CHIP_HAWAII:
1917         case CHIP_KAVERI:
1918         case CHIP_KABINI:
1919         case CHIP_MULLINS:
1920 #endif
1921         case CHIP_TOPAZ:
1922         case CHIP_TONGA:
1923         case CHIP_FIJI:
1924         case CHIP_POLARIS10:
1925         case CHIP_POLARIS11:
1926         case CHIP_POLARIS12:
1927         case CHIP_VEGAM:
1928         case CHIP_CARRIZO:
1929         case CHIP_STONEY:
1930         case CHIP_VEGA20:
1931         case CHIP_ALDEBARAN:
1932         case CHIP_SIENNA_CICHLID:
1933         case CHIP_NAVY_FLOUNDER:
1934         case CHIP_DIMGREY_CAVEFISH:
1935         case CHIP_BEIGE_GOBY:
1936         default:
1937                 return 0;
1938         case CHIP_VEGA10:
1939                 chip_name = "vega10";
1940                 break;
1941         case CHIP_VEGA12:
1942                 chip_name = "vega12";
1943                 break;
1944         case CHIP_RAVEN:
1945                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1946                         chip_name = "raven2";
1947                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1948                         chip_name = "picasso";
1949                 else
1950                         chip_name = "raven";
1951                 break;
1952         case CHIP_ARCTURUS:
1953                 chip_name = "arcturus";
1954                 break;
1955         case CHIP_RENOIR:
1956                 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1957                         chip_name = "renoir";
1958                 else
1959                         chip_name = "green_sardine";
1960                 break;
1961         case CHIP_NAVI10:
1962                 chip_name = "navi10";
1963                 break;
1964         case CHIP_NAVI14:
1965                 chip_name = "navi14";
1966                 break;
1967         case CHIP_NAVI12:
1968                 chip_name = "navi12";
1969                 break;
1970         case CHIP_VANGOGH:
1971                 chip_name = "vangogh";
1972                 break;
1973         case CHIP_YELLOW_CARP:
1974                 chip_name = "yellow_carp";
1975                 break;
1976         }
1977
1978         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1979         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1980         if (err) {
1981                 dev_err(adev->dev,
1982                         "Failed to load gpu_info firmware \"%s\"\n",
1983                         fw_name);
1984                 goto out;
1985         }
1986         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1987         if (err) {
1988                 dev_err(adev->dev,
1989                         "Failed to validate gpu_info firmware \"%s\"\n",
1990                         fw_name);
1991                 goto out;
1992         }
1993
1994         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1995         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1996
1997         switch (hdr->version_major) {
1998         case 1:
1999         {
2000                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2001                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2002                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2003
2004                 /*
2005                  * Should be droped when DAL no longer needs it.
2006                  */
2007                 if (adev->asic_type == CHIP_NAVI12)
2008                         goto parse_soc_bounding_box;
2009
2010                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2011                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2012                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2013                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2014                 adev->gfx.config.max_texture_channel_caches =
2015                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
2016                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2017                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2018                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2019                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2020                 adev->gfx.config.double_offchip_lds_buf =
2021                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2022                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2023                 adev->gfx.cu_info.max_waves_per_simd =
2024                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2025                 adev->gfx.cu_info.max_scratch_slots_per_cu =
2026                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2027                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2028                 if (hdr->version_minor >= 1) {
2029                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2030                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2031                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2032                         adev->gfx.config.num_sc_per_sh =
2033                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2034                         adev->gfx.config.num_packer_per_sc =
2035                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2036                 }
2037
2038 parse_soc_bounding_box:
2039                 /*
2040                  * soc bounding box info is not integrated in disocovery table,
2041                  * we always need to parse it from gpu info firmware if needed.
2042                  */
2043                 if (hdr->version_minor == 2) {
2044                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2045                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2046                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2047                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2048                 }
2049                 break;
2050         }
2051         default:
2052                 dev_err(adev->dev,
2053                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2054                 err = -EINVAL;
2055                 goto out;
2056         }
2057 out:
2058         return err;
2059 }
2060
2061 /**
2062  * amdgpu_device_ip_early_init - run early init for hardware IPs
2063  *
2064  * @adev: amdgpu_device pointer
2065  *
2066  * Early initialization pass for hardware IPs.  The hardware IPs that make
2067  * up each asic are discovered each IP's early_init callback is run.  This
2068  * is the first stage in initializing the asic.
2069  * Returns 0 on success, negative error code on failure.
2070  */
2071 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2072 {
2073         int i, r;
2074
2075         amdgpu_device_enable_virtual_display(adev);
2076
2077         if (amdgpu_sriov_vf(adev)) {
2078                 r = amdgpu_virt_request_full_gpu(adev, true);
2079                 if (r)
2080                         return r;
2081         }
2082
2083         switch (adev->asic_type) {
2084 #ifdef CONFIG_DRM_AMDGPU_SI
2085         case CHIP_VERDE:
2086         case CHIP_TAHITI:
2087         case CHIP_PITCAIRN:
2088         case CHIP_OLAND:
2089         case CHIP_HAINAN:
2090                 adev->family = AMDGPU_FAMILY_SI;
2091                 r = si_set_ip_blocks(adev);
2092                 if (r)
2093                         return r;
2094                 break;
2095 #endif
2096 #ifdef CONFIG_DRM_AMDGPU_CIK
2097         case CHIP_BONAIRE:
2098         case CHIP_HAWAII:
2099         case CHIP_KAVERI:
2100         case CHIP_KABINI:
2101         case CHIP_MULLINS:
2102                 if (adev->flags & AMD_IS_APU)
2103                         adev->family = AMDGPU_FAMILY_KV;
2104                 else
2105                         adev->family = AMDGPU_FAMILY_CI;
2106
2107                 r = cik_set_ip_blocks(adev);
2108                 if (r)
2109                         return r;
2110                 break;
2111 #endif
2112         case CHIP_TOPAZ:
2113         case CHIP_TONGA:
2114         case CHIP_FIJI:
2115         case CHIP_POLARIS10:
2116         case CHIP_POLARIS11:
2117         case CHIP_POLARIS12:
2118         case CHIP_VEGAM:
2119         case CHIP_CARRIZO:
2120         case CHIP_STONEY:
2121                 if (adev->flags & AMD_IS_APU)
2122                         adev->family = AMDGPU_FAMILY_CZ;
2123                 else
2124                         adev->family = AMDGPU_FAMILY_VI;
2125
2126                 r = vi_set_ip_blocks(adev);
2127                 if (r)
2128                         return r;
2129                 break;
2130         default:
2131                 r = amdgpu_discovery_set_ip_blocks(adev);
2132                 if (r)
2133                         return r;
2134                 break;
2135         }
2136
2137         amdgpu_amdkfd_device_probe(adev);
2138
2139         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2140         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2141                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2142         if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2143                 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2144
2145         for (i = 0; i < adev->num_ip_blocks; i++) {
2146                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2147                         DRM_ERROR("disabled ip block: %d <%s>\n",
2148                                   i, adev->ip_blocks[i].version->funcs->name);
2149                         adev->ip_blocks[i].status.valid = false;
2150                 } else {
2151                         if (adev->ip_blocks[i].version->funcs->early_init) {
2152                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2153                                 if (r == -ENOENT) {
2154                                         adev->ip_blocks[i].status.valid = false;
2155                                 } else if (r) {
2156                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2157                                                   adev->ip_blocks[i].version->funcs->name, r);
2158                                         return r;
2159                                 } else {
2160                                         adev->ip_blocks[i].status.valid = true;
2161                                 }
2162                         } else {
2163                                 adev->ip_blocks[i].status.valid = true;
2164                         }
2165                 }
2166                 /* get the vbios after the asic_funcs are set up */
2167                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2168                         r = amdgpu_device_parse_gpu_info_fw(adev);
2169                         if (r)
2170                                 return r;
2171
2172                         /* Read BIOS */
2173                         if (!amdgpu_get_bios(adev))
2174                                 return -EINVAL;
2175
2176                         r = amdgpu_atombios_init(adev);
2177                         if (r) {
2178                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2179                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2180                                 return r;
2181                         }
2182
2183                         /*get pf2vf msg info at it's earliest time*/
2184                         if (amdgpu_sriov_vf(adev))
2185                                 amdgpu_virt_init_data_exchange(adev);
2186
2187                 }
2188         }
2189
2190         adev->cg_flags &= amdgpu_cg_mask;
2191         adev->pg_flags &= amdgpu_pg_mask;
2192
2193         return 0;
2194 }
2195
2196 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2197 {
2198         int i, r;
2199
2200         for (i = 0; i < adev->num_ip_blocks; i++) {
2201                 if (!adev->ip_blocks[i].status.sw)
2202                         continue;
2203                 if (adev->ip_blocks[i].status.hw)
2204                         continue;
2205                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2206                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2207                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2208                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2209                         if (r) {
2210                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2211                                           adev->ip_blocks[i].version->funcs->name, r);
2212                                 return r;
2213                         }
2214                         adev->ip_blocks[i].status.hw = true;
2215                 }
2216         }
2217
2218         return 0;
2219 }
2220
2221 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2222 {
2223         int i, r;
2224
2225         for (i = 0; i < adev->num_ip_blocks; i++) {
2226                 if (!adev->ip_blocks[i].status.sw)
2227                         continue;
2228                 if (adev->ip_blocks[i].status.hw)
2229                         continue;
2230                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2231                 if (r) {
2232                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2233                                   adev->ip_blocks[i].version->funcs->name, r);
2234                         return r;
2235                 }
2236                 adev->ip_blocks[i].status.hw = true;
2237         }
2238
2239         return 0;
2240 }
2241
2242 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2243 {
2244         int r = 0;
2245         int i;
2246         uint32_t smu_version;
2247
2248         if (adev->asic_type >= CHIP_VEGA10) {
2249                 for (i = 0; i < adev->num_ip_blocks; i++) {
2250                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2251                                 continue;
2252
2253                         if (!adev->ip_blocks[i].status.sw)
2254                                 continue;
2255
2256                         /* no need to do the fw loading again if already done*/
2257                         if (adev->ip_blocks[i].status.hw == true)
2258                                 break;
2259
2260                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2261                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2262                                 if (r) {
2263                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2264                                                           adev->ip_blocks[i].version->funcs->name, r);
2265                                         return r;
2266                                 }
2267                         } else {
2268                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2269                                 if (r) {
2270                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2271                                                           adev->ip_blocks[i].version->funcs->name, r);
2272                                         return r;
2273                                 }
2274                         }
2275
2276                         adev->ip_blocks[i].status.hw = true;
2277                         break;
2278                 }
2279         }
2280
2281         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2282                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2283
2284         return r;
2285 }
2286
2287 /**
2288  * amdgpu_device_ip_init - run init for hardware IPs
2289  *
2290  * @adev: amdgpu_device pointer
2291  *
2292  * Main initialization pass for hardware IPs.  The list of all the hardware
2293  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2294  * are run.  sw_init initializes the software state associated with each IP
2295  * and hw_init initializes the hardware associated with each IP.
2296  * Returns 0 on success, negative error code on failure.
2297  */
2298 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2299 {
2300         int i, r;
2301
2302         r = amdgpu_ras_init(adev);
2303         if (r)
2304                 return r;
2305
2306         for (i = 0; i < adev->num_ip_blocks; i++) {
2307                 if (!adev->ip_blocks[i].status.valid)
2308                         continue;
2309                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2310                 if (r) {
2311                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2312                                   adev->ip_blocks[i].version->funcs->name, r);
2313                         goto init_failed;
2314                 }
2315                 adev->ip_blocks[i].status.sw = true;
2316
2317                 /* need to do gmc hw init early so we can allocate gpu mem */
2318                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2319                         r = amdgpu_device_vram_scratch_init(adev);
2320                         if (r) {
2321                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2322                                 goto init_failed;
2323                         }
2324                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2325                         if (r) {
2326                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2327                                 goto init_failed;
2328                         }
2329                         r = amdgpu_device_wb_init(adev);
2330                         if (r) {
2331                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2332                                 goto init_failed;
2333                         }
2334                         adev->ip_blocks[i].status.hw = true;
2335
2336                         /* right after GMC hw init, we create CSA */
2337                         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2338                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2339                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2340                                                                 AMDGPU_CSA_SIZE);
2341                                 if (r) {
2342                                         DRM_ERROR("allocate CSA failed %d\n", r);
2343                                         goto init_failed;
2344                                 }
2345                         }
2346                 }
2347         }
2348
2349         if (amdgpu_sriov_vf(adev))
2350                 amdgpu_virt_init_data_exchange(adev);
2351
2352         r = amdgpu_ib_pool_init(adev);
2353         if (r) {
2354                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2355                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2356                 goto init_failed;
2357         }
2358
2359         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2360         if (r)
2361                 goto init_failed;
2362
2363         r = amdgpu_device_ip_hw_init_phase1(adev);
2364         if (r)
2365                 goto init_failed;
2366
2367         r = amdgpu_device_fw_loading(adev);
2368         if (r)
2369                 goto init_failed;
2370
2371         r = amdgpu_device_ip_hw_init_phase2(adev);
2372         if (r)
2373                 goto init_failed;
2374
2375         /*
2376          * retired pages will be loaded from eeprom and reserved here,
2377          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2378          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2379          * for I2C communication which only true at this point.
2380          *
2381          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2382          * failure from bad gpu situation and stop amdgpu init process
2383          * accordingly. For other failed cases, it will still release all
2384          * the resource and print error message, rather than returning one
2385          * negative value to upper level.
2386          *
2387          * Note: theoretically, this should be called before all vram allocations
2388          * to protect retired page from abusing
2389          */
2390         r = amdgpu_ras_recovery_init(adev);
2391         if (r)
2392                 goto init_failed;
2393
2394         if (adev->gmc.xgmi.num_physical_nodes > 1)
2395                 amdgpu_xgmi_add_device(adev);
2396
2397         /* Don't init kfd if whole hive need to be reset during init */
2398         if (!adev->gmc.xgmi.pending_reset)
2399                 amdgpu_amdkfd_device_init(adev);
2400
2401         amdgpu_fru_get_product_info(adev);
2402
2403 init_failed:
2404         if (amdgpu_sriov_vf(adev))
2405                 amdgpu_virt_release_full_gpu(adev, true);
2406
2407         return r;
2408 }
2409
2410 /**
2411  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2412  *
2413  * @adev: amdgpu_device pointer
2414  *
2415  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2416  * this function before a GPU reset.  If the value is retained after a
2417  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2418  */
2419 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2420 {
2421         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2422 }
2423
2424 /**
2425  * amdgpu_device_check_vram_lost - check if vram is valid
2426  *
2427  * @adev: amdgpu_device pointer
2428  *
2429  * Checks the reset magic value written to the gart pointer in VRAM.
2430  * The driver calls this after a GPU reset to see if the contents of
2431  * VRAM is lost or now.
2432  * returns true if vram is lost, false if not.
2433  */
2434 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2435 {
2436         if (memcmp(adev->gart.ptr, adev->reset_magic,
2437                         AMDGPU_RESET_MAGIC_NUM))
2438                 return true;
2439
2440         if (!amdgpu_in_reset(adev))
2441                 return false;
2442
2443         /*
2444          * For all ASICs with baco/mode1 reset, the VRAM is
2445          * always assumed to be lost.
2446          */
2447         switch (amdgpu_asic_reset_method(adev)) {
2448         case AMD_RESET_METHOD_BACO:
2449         case AMD_RESET_METHOD_MODE1:
2450                 return true;
2451         default:
2452                 return false;
2453         }
2454 }
2455
2456 /**
2457  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2458  *
2459  * @adev: amdgpu_device pointer
2460  * @state: clockgating state (gate or ungate)
2461  *
2462  * The list of all the hardware IPs that make up the asic is walked and the
2463  * set_clockgating_state callbacks are run.
2464  * Late initialization pass enabling clockgating for hardware IPs.
2465  * Fini or suspend, pass disabling clockgating for hardware IPs.
2466  * Returns 0 on success, negative error code on failure.
2467  */
2468
2469 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2470                                enum amd_clockgating_state state)
2471 {
2472         int i, j, r;
2473
2474         if (amdgpu_emu_mode == 1)
2475                 return 0;
2476
2477         for (j = 0; j < adev->num_ip_blocks; j++) {
2478                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2479                 if (!adev->ip_blocks[i].status.late_initialized)
2480                         continue;
2481                 /* skip CG for GFX on S0ix */
2482                 if (adev->in_s0ix &&
2483                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2484                         continue;
2485                 /* skip CG for VCE/UVD, it's handled specially */
2486                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2487                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2488                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2489                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2490                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2491                         /* enable clockgating to save power */
2492                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2493                                                                                      state);
2494                         if (r) {
2495                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2496                                           adev->ip_blocks[i].version->funcs->name, r);
2497                                 return r;
2498                         }
2499                 }
2500         }
2501
2502         return 0;
2503 }
2504
2505 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2506                                enum amd_powergating_state state)
2507 {
2508         int i, j, r;
2509
2510         if (amdgpu_emu_mode == 1)
2511                 return 0;
2512
2513         for (j = 0; j < adev->num_ip_blocks; j++) {
2514                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2515                 if (!adev->ip_blocks[i].status.late_initialized)
2516                         continue;
2517                 /* skip PG for GFX on S0ix */
2518                 if (adev->in_s0ix &&
2519                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2520                         continue;
2521                 /* skip CG for VCE/UVD, it's handled specially */
2522                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2523                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2524                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2525                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2526                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2527                         /* enable powergating to save power */
2528                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2529                                                                                         state);
2530                         if (r) {
2531                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2532                                           adev->ip_blocks[i].version->funcs->name, r);
2533                                 return r;
2534                         }
2535                 }
2536         }
2537         return 0;
2538 }
2539
2540 static int amdgpu_device_enable_mgpu_fan_boost(void)
2541 {
2542         struct amdgpu_gpu_instance *gpu_ins;
2543         struct amdgpu_device *adev;
2544         int i, ret = 0;
2545
2546         mutex_lock(&mgpu_info.mutex);
2547
2548         /*
2549          * MGPU fan boost feature should be enabled
2550          * only when there are two or more dGPUs in
2551          * the system
2552          */
2553         if (mgpu_info.num_dgpu < 2)
2554                 goto out;
2555
2556         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2557                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2558                 adev = gpu_ins->adev;
2559                 if (!(adev->flags & AMD_IS_APU) &&
2560                     !gpu_ins->mgpu_fan_enabled) {
2561                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2562                         if (ret)
2563                                 break;
2564
2565                         gpu_ins->mgpu_fan_enabled = 1;
2566                 }
2567         }
2568
2569 out:
2570         mutex_unlock(&mgpu_info.mutex);
2571
2572         return ret;
2573 }
2574
2575 /**
2576  * amdgpu_device_ip_late_init - run late init for hardware IPs
2577  *
2578  * @adev: amdgpu_device pointer
2579  *
2580  * Late initialization pass for hardware IPs.  The list of all the hardware
2581  * IPs that make up the asic is walked and the late_init callbacks are run.
2582  * late_init covers any special initialization that an IP requires
2583  * after all of the have been initialized or something that needs to happen
2584  * late in the init process.
2585  * Returns 0 on success, negative error code on failure.
2586  */
2587 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2588 {
2589         struct amdgpu_gpu_instance *gpu_instance;
2590         int i = 0, r;
2591
2592         for (i = 0; i < adev->num_ip_blocks; i++) {
2593                 if (!adev->ip_blocks[i].status.hw)
2594                         continue;
2595                 if (adev->ip_blocks[i].version->funcs->late_init) {
2596                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2597                         if (r) {
2598                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2599                                           adev->ip_blocks[i].version->funcs->name, r);
2600                                 return r;
2601                         }
2602                 }
2603                 adev->ip_blocks[i].status.late_initialized = true;
2604         }
2605
2606         amdgpu_ras_set_error_query_ready(adev, true);
2607
2608         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2609         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2610
2611         amdgpu_device_fill_reset_magic(adev);
2612
2613         r = amdgpu_device_enable_mgpu_fan_boost();
2614         if (r)
2615                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2616
2617         /* For XGMI + passthrough configuration on arcturus, enable light SBR */
2618         if (adev->asic_type == CHIP_ARCTURUS &&
2619             amdgpu_passthrough(adev) &&
2620             adev->gmc.xgmi.num_physical_nodes > 1)
2621                 smu_set_light_sbr(&adev->smu, true);
2622
2623         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2624                 mutex_lock(&mgpu_info.mutex);
2625
2626                 /*
2627                  * Reset device p-state to low as this was booted with high.
2628                  *
2629                  * This should be performed only after all devices from the same
2630                  * hive get initialized.
2631                  *
2632                  * However, it's unknown how many device in the hive in advance.
2633                  * As this is counted one by one during devices initializations.
2634                  *
2635                  * So, we wait for all XGMI interlinked devices initialized.
2636                  * This may bring some delays as those devices may come from
2637                  * different hives. But that should be OK.
2638                  */
2639                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2640                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2641                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2642                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2643                                         continue;
2644
2645                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2646                                                 AMDGPU_XGMI_PSTATE_MIN);
2647                                 if (r) {
2648                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2649                                         break;
2650                                 }
2651                         }
2652                 }
2653
2654                 mutex_unlock(&mgpu_info.mutex);
2655         }
2656
2657         return 0;
2658 }
2659
2660 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2661 {
2662         int i, r;
2663
2664         for (i = 0; i < adev->num_ip_blocks; i++) {
2665                 if (!adev->ip_blocks[i].version->funcs->early_fini)
2666                         continue;
2667
2668                 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2669                 if (r) {
2670                         DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2671                                   adev->ip_blocks[i].version->funcs->name, r);
2672                 }
2673         }
2674
2675         amdgpu_amdkfd_suspend(adev, false);
2676
2677         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2678         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2679
2680         /* need to disable SMC first */
2681         for (i = 0; i < adev->num_ip_blocks; i++) {
2682                 if (!adev->ip_blocks[i].status.hw)
2683                         continue;
2684                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2685                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2686                         /* XXX handle errors */
2687                         if (r) {
2688                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2689                                           adev->ip_blocks[i].version->funcs->name, r);
2690                         }
2691                         adev->ip_blocks[i].status.hw = false;
2692                         break;
2693                 }
2694         }
2695
2696         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2697                 if (!adev->ip_blocks[i].status.hw)
2698                         continue;
2699
2700                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2701                 /* XXX handle errors */
2702                 if (r) {
2703                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2704                                   adev->ip_blocks[i].version->funcs->name, r);
2705                 }
2706
2707                 adev->ip_blocks[i].status.hw = false;
2708         }
2709
2710         if (amdgpu_sriov_vf(adev)) {
2711                 if (amdgpu_virt_release_full_gpu(adev, false))
2712                         DRM_ERROR("failed to release exclusive mode on fini\n");
2713         }
2714
2715         return 0;
2716 }
2717
2718 /**
2719  * amdgpu_device_ip_fini - run fini for hardware IPs
2720  *
2721  * @adev: amdgpu_device pointer
2722  *
2723  * Main teardown pass for hardware IPs.  The list of all the hardware
2724  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2725  * are run.  hw_fini tears down the hardware associated with each IP
2726  * and sw_fini tears down any software state associated with each IP.
2727  * Returns 0 on success, negative error code on failure.
2728  */
2729 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2730 {
2731         int i, r;
2732
2733         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2734                 amdgpu_virt_release_ras_err_handler_data(adev);
2735
2736         amdgpu_ras_pre_fini(adev);
2737
2738         if (adev->gmc.xgmi.num_physical_nodes > 1)
2739                 amdgpu_xgmi_remove_device(adev);
2740
2741         amdgpu_amdkfd_device_fini_sw(adev);
2742
2743         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2744                 if (!adev->ip_blocks[i].status.sw)
2745                         continue;
2746
2747                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2748                         amdgpu_ucode_free_bo(adev);
2749                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2750                         amdgpu_device_wb_fini(adev);
2751                         amdgpu_device_vram_scratch_fini(adev);
2752                         amdgpu_ib_pool_fini(adev);
2753                 }
2754
2755                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2756                 /* XXX handle errors */
2757                 if (r) {
2758                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2759                                   adev->ip_blocks[i].version->funcs->name, r);
2760                 }
2761                 adev->ip_blocks[i].status.sw = false;
2762                 adev->ip_blocks[i].status.valid = false;
2763         }
2764
2765         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2766                 if (!adev->ip_blocks[i].status.late_initialized)
2767                         continue;
2768                 if (adev->ip_blocks[i].version->funcs->late_fini)
2769                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2770                 adev->ip_blocks[i].status.late_initialized = false;
2771         }
2772
2773         amdgpu_ras_fini(adev);
2774
2775         return 0;
2776 }
2777
2778 /**
2779  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2780  *
2781  * @work: work_struct.
2782  */
2783 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2784 {
2785         struct amdgpu_device *adev =
2786                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2787         int r;
2788
2789         r = amdgpu_ib_ring_tests(adev);
2790         if (r)
2791                 DRM_ERROR("ib ring test failed (%d).\n", r);
2792 }
2793
2794 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2795 {
2796         struct amdgpu_device *adev =
2797                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2798
2799         WARN_ON_ONCE(adev->gfx.gfx_off_state);
2800         WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2801
2802         if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2803                 adev->gfx.gfx_off_state = true;
2804 }
2805
2806 /**
2807  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2808  *
2809  * @adev: amdgpu_device pointer
2810  *
2811  * Main suspend function for hardware IPs.  The list of all the hardware
2812  * IPs that make up the asic is walked, clockgating is disabled and the
2813  * suspend callbacks are run.  suspend puts the hardware and software state
2814  * in each IP into a state suitable for suspend.
2815  * Returns 0 on success, negative error code on failure.
2816  */
2817 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2818 {
2819         int i, r;
2820
2821         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2822         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2823
2824         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2825                 if (!adev->ip_blocks[i].status.valid)
2826                         continue;
2827
2828                 /* displays are handled separately */
2829                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2830                         continue;
2831
2832                 /* XXX handle errors */
2833                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2834                 /* XXX handle errors */
2835                 if (r) {
2836                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2837                                   adev->ip_blocks[i].version->funcs->name, r);
2838                         return r;
2839                 }
2840
2841                 adev->ip_blocks[i].status.hw = false;
2842         }
2843
2844         return 0;
2845 }
2846
2847 /**
2848  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2849  *
2850  * @adev: amdgpu_device pointer
2851  *
2852  * Main suspend function for hardware IPs.  The list of all the hardware
2853  * IPs that make up the asic is walked, clockgating is disabled and the
2854  * suspend callbacks are run.  suspend puts the hardware and software state
2855  * in each IP into a state suitable for suspend.
2856  * Returns 0 on success, negative error code on failure.
2857  */
2858 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2859 {
2860         int i, r;
2861
2862         if (adev->in_s0ix)
2863                 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
2864
2865         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2866                 if (!adev->ip_blocks[i].status.valid)
2867                         continue;
2868                 /* displays are handled in phase1 */
2869                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2870                         continue;
2871                 /* PSP lost connection when err_event_athub occurs */
2872                 if (amdgpu_ras_intr_triggered() &&
2873                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2874                         adev->ip_blocks[i].status.hw = false;
2875                         continue;
2876                 }
2877
2878                 /* skip unnecessary suspend if we do not initialize them yet */
2879                 if (adev->gmc.xgmi.pending_reset &&
2880                     !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2881                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2882                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2883                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2884                         adev->ip_blocks[i].status.hw = false;
2885                         continue;
2886                 }
2887
2888                 /* skip suspend of gfx and psp for S0ix
2889                  * gfx is in gfxoff state, so on resume it will exit gfxoff just
2890                  * like at runtime. PSP is also part of the always on hardware
2891                  * so no need to suspend it.
2892                  */
2893                 if (adev->in_s0ix &&
2894                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2895                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
2896                         continue;
2897
2898                 /* XXX handle errors */
2899                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2900                 /* XXX handle errors */
2901                 if (r) {
2902                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2903                                   adev->ip_blocks[i].version->funcs->name, r);
2904                 }
2905                 adev->ip_blocks[i].status.hw = false;
2906                 /* handle putting the SMC in the appropriate state */
2907                 if(!amdgpu_sriov_vf(adev)){
2908                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2909                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2910                                 if (r) {
2911                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2912                                                         adev->mp1_state, r);
2913                                         return r;
2914                                 }
2915                         }
2916                 }
2917         }
2918
2919         return 0;
2920 }
2921
2922 /**
2923  * amdgpu_device_ip_suspend - run suspend for hardware IPs
2924  *
2925  * @adev: amdgpu_device pointer
2926  *
2927  * Main suspend function for hardware IPs.  The list of all the hardware
2928  * IPs that make up the asic is walked, clockgating is disabled and the
2929  * suspend callbacks are run.  suspend puts the hardware and software state
2930  * in each IP into a state suitable for suspend.
2931  * Returns 0 on success, negative error code on failure.
2932  */
2933 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2934 {
2935         int r;
2936
2937         if (amdgpu_sriov_vf(adev)) {
2938                 amdgpu_virt_fini_data_exchange(adev);
2939                 amdgpu_virt_request_full_gpu(adev, false);
2940         }
2941
2942         r = amdgpu_device_ip_suspend_phase1(adev);
2943         if (r)
2944                 return r;
2945         r = amdgpu_device_ip_suspend_phase2(adev);
2946
2947         if (amdgpu_sriov_vf(adev))
2948                 amdgpu_virt_release_full_gpu(adev, false);
2949
2950         return r;
2951 }
2952
2953 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2954 {
2955         int i, r;
2956
2957         static enum amd_ip_block_type ip_order[] = {
2958                 AMD_IP_BLOCK_TYPE_GMC,
2959                 AMD_IP_BLOCK_TYPE_COMMON,
2960                 AMD_IP_BLOCK_TYPE_PSP,
2961                 AMD_IP_BLOCK_TYPE_IH,
2962         };
2963
2964         for (i = 0; i < adev->num_ip_blocks; i++) {
2965                 int j;
2966                 struct amdgpu_ip_block *block;
2967
2968                 block = &adev->ip_blocks[i];
2969                 block->status.hw = false;
2970
2971                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2972
2973                         if (block->version->type != ip_order[j] ||
2974                                 !block->status.valid)
2975                                 continue;
2976
2977                         r = block->version->funcs->hw_init(adev);
2978                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2979                         if (r)
2980                                 return r;
2981                         block->status.hw = true;
2982                 }
2983         }
2984
2985         return 0;
2986 }
2987
2988 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2989 {
2990         int i, r;
2991
2992         static enum amd_ip_block_type ip_order[] = {
2993                 AMD_IP_BLOCK_TYPE_SMC,
2994                 AMD_IP_BLOCK_TYPE_DCE,
2995                 AMD_IP_BLOCK_TYPE_GFX,
2996                 AMD_IP_BLOCK_TYPE_SDMA,
2997                 AMD_IP_BLOCK_TYPE_UVD,
2998                 AMD_IP_BLOCK_TYPE_VCE,
2999                 AMD_IP_BLOCK_TYPE_VCN
3000         };
3001
3002         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3003                 int j;
3004                 struct amdgpu_ip_block *block;
3005
3006                 for (j = 0; j < adev->num_ip_blocks; j++) {
3007                         block = &adev->ip_blocks[j];
3008
3009                         if (block->version->type != ip_order[i] ||
3010                                 !block->status.valid ||
3011                                 block->status.hw)
3012                                 continue;
3013
3014                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3015                                 r = block->version->funcs->resume(adev);
3016                         else
3017                                 r = block->version->funcs->hw_init(adev);
3018
3019                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3020                         if (r)
3021                                 return r;
3022                         block->status.hw = true;
3023                 }
3024         }
3025
3026         return 0;
3027 }
3028
3029 /**
3030  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3031  *
3032  * @adev: amdgpu_device pointer
3033  *
3034  * First resume function for hardware IPs.  The list of all the hardware
3035  * IPs that make up the asic is walked and the resume callbacks are run for
3036  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3037  * after a suspend and updates the software state as necessary.  This
3038  * function is also used for restoring the GPU after a GPU reset.
3039  * Returns 0 on success, negative error code on failure.
3040  */
3041 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3042 {
3043         int i, r;
3044
3045         for (i = 0; i < adev->num_ip_blocks; i++) {
3046                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3047                         continue;
3048                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3049                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3050                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
3051
3052                         r = adev->ip_blocks[i].version->funcs->resume(adev);
3053                         if (r) {
3054                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
3055                                           adev->ip_blocks[i].version->funcs->name, r);
3056                                 return r;
3057                         }
3058                         adev->ip_blocks[i].status.hw = true;
3059                 }
3060         }
3061
3062         return 0;
3063 }
3064
3065 /**
3066  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3067  *
3068  * @adev: amdgpu_device pointer
3069  *
3070  * First resume function for hardware IPs.  The list of all the hardware
3071  * IPs that make up the asic is walked and the resume callbacks are run for
3072  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3073  * functional state after a suspend and updates the software state as
3074  * necessary.  This function is also used for restoring the GPU after a GPU
3075  * reset.
3076  * Returns 0 on success, negative error code on failure.
3077  */
3078 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3079 {
3080         int i, r;
3081
3082         for (i = 0; i < adev->num_ip_blocks; i++) {
3083                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3084                         continue;
3085                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3086                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3087                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3088                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3089                         continue;
3090                 r = adev->ip_blocks[i].version->funcs->resume(adev);
3091                 if (r) {
3092                         DRM_ERROR("resume of IP block <%s> failed %d\n",
3093                                   adev->ip_blocks[i].version->funcs->name, r);
3094                         return r;
3095                 }
3096                 adev->ip_blocks[i].status.hw = true;
3097         }
3098
3099         return 0;
3100 }
3101
3102 /**
3103  * amdgpu_device_ip_resume - run resume for hardware IPs
3104  *
3105  * @adev: amdgpu_device pointer
3106  *
3107  * Main resume function for hardware IPs.  The hardware IPs
3108  * are split into two resume functions because they are
3109  * are also used in in recovering from a GPU reset and some additional
3110  * steps need to be take between them.  In this case (S3/S4) they are
3111  * run sequentially.
3112  * Returns 0 on success, negative error code on failure.
3113  */
3114 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3115 {
3116         int r;
3117
3118         r = amdgpu_amdkfd_resume_iommu(adev);
3119         if (r)
3120                 return r;
3121
3122         r = amdgpu_device_ip_resume_phase1(adev);
3123         if (r)
3124                 return r;
3125
3126         r = amdgpu_device_fw_loading(adev);
3127         if (r)
3128                 return r;
3129
3130         r = amdgpu_device_ip_resume_phase2(adev);
3131
3132         return r;
3133 }
3134
3135 /**
3136  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3137  *
3138  * @adev: amdgpu_device pointer
3139  *
3140  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3141  */
3142 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3143 {
3144         if (amdgpu_sriov_vf(adev)) {
3145                 if (adev->is_atom_fw) {
3146                         if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3147                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3148                 } else {
3149                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3150                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3151                 }
3152
3153                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3154                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3155         }
3156 }
3157
3158 /**
3159  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3160  *
3161  * @asic_type: AMD asic type
3162  *
3163  * Check if there is DC (new modesetting infrastructre) support for an asic.
3164  * returns true if DC has support, false if not.
3165  */
3166 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3167 {
3168         switch (asic_type) {
3169 #if defined(CONFIG_DRM_AMD_DC)
3170         case CHIP_TAHITI:
3171         case CHIP_PITCAIRN:
3172         case CHIP_VERDE:
3173         case CHIP_OLAND:
3174                 /*
3175                  * We have systems in the wild with these ASICs that require
3176                  * LVDS and VGA support which is not supported with DC.
3177                  *
3178                  * Fallback to the non-DC driver here by default so as not to
3179                  * cause regressions.
3180                  */
3181 #if defined(CONFIG_DRM_AMD_DC_SI)
3182                 return amdgpu_dc > 0;
3183 #else
3184                 return false;
3185 #endif
3186         case CHIP_BONAIRE:
3187         case CHIP_KAVERI:
3188         case CHIP_KABINI:
3189         case CHIP_MULLINS:
3190                 /*
3191                  * We have systems in the wild with these ASICs that require
3192                  * LVDS and VGA support which is not supported with DC.
3193                  *
3194                  * Fallback to the non-DC driver here by default so as not to
3195                  * cause regressions.
3196                  */
3197                 return amdgpu_dc > 0;
3198         case CHIP_HAWAII:
3199         case CHIP_CARRIZO:
3200         case CHIP_STONEY:
3201         case CHIP_POLARIS10:
3202         case CHIP_POLARIS11:
3203         case CHIP_POLARIS12:
3204         case CHIP_VEGAM:
3205         case CHIP_TONGA:
3206         case CHIP_FIJI:
3207         case CHIP_VEGA10:
3208         case CHIP_VEGA12:
3209         case CHIP_VEGA20:
3210 #if defined(CONFIG_DRM_AMD_DC_DCN)
3211         case CHIP_RAVEN:
3212         case CHIP_NAVI10:
3213         case CHIP_NAVI14:
3214         case CHIP_NAVI12:
3215         case CHIP_RENOIR:
3216         case CHIP_CYAN_SKILLFISH:
3217         case CHIP_SIENNA_CICHLID:
3218         case CHIP_NAVY_FLOUNDER:
3219         case CHIP_DIMGREY_CAVEFISH:
3220         case CHIP_BEIGE_GOBY:
3221         case CHIP_VANGOGH:
3222         case CHIP_YELLOW_CARP:
3223 #endif
3224         default:
3225                 return amdgpu_dc != 0;
3226 #else
3227         default:
3228                 if (amdgpu_dc > 0)
3229                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3230                                          "but isn't supported by ASIC, ignoring\n");
3231                 return false;
3232 #endif
3233         }
3234 }
3235
3236 /**
3237  * amdgpu_device_has_dc_support - check if dc is supported
3238  *
3239  * @adev: amdgpu_device pointer
3240  *
3241  * Returns true for supported, false for not supported
3242  */
3243 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3244 {
3245         if (amdgpu_sriov_vf(adev) || 
3246             adev->enable_virtual_display ||
3247             (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3248                 return false;
3249
3250         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3251 }
3252
3253 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3254 {
3255         struct amdgpu_device *adev =
3256                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3257         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3258
3259         /* It's a bug to not have a hive within this function */
3260         if (WARN_ON(!hive))
3261                 return;
3262
3263         /*
3264          * Use task barrier to synchronize all xgmi reset works across the
3265          * hive. task_barrier_enter and task_barrier_exit will block
3266          * until all the threads running the xgmi reset works reach
3267          * those points. task_barrier_full will do both blocks.
3268          */
3269         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3270
3271                 task_barrier_enter(&hive->tb);
3272                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3273
3274                 if (adev->asic_reset_res)
3275                         goto fail;
3276
3277                 task_barrier_exit(&hive->tb);
3278                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3279
3280                 if (adev->asic_reset_res)
3281                         goto fail;
3282
3283                 if (adev->mmhub.ras_funcs &&
3284                     adev->mmhub.ras_funcs->reset_ras_error_count)
3285                         adev->mmhub.ras_funcs->reset_ras_error_count(adev);
3286         } else {
3287
3288                 task_barrier_full(&hive->tb);
3289                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3290         }
3291
3292 fail:
3293         if (adev->asic_reset_res)
3294                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3295                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3296         amdgpu_put_xgmi_hive(hive);
3297 }
3298
3299 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3300 {
3301         char *input = amdgpu_lockup_timeout;
3302         char *timeout_setting = NULL;
3303         int index = 0;
3304         long timeout;
3305         int ret = 0;
3306
3307         /*
3308          * By default timeout for non compute jobs is 10000
3309          * and 60000 for compute jobs.
3310          * In SR-IOV or passthrough mode, timeout for compute
3311          * jobs are 60000 by default.
3312          */
3313         adev->gfx_timeout = msecs_to_jiffies(10000);
3314         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3315         if (amdgpu_sriov_vf(adev))
3316                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3317                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3318         else
3319                 adev->compute_timeout =  msecs_to_jiffies(60000);
3320
3321         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3322                 while ((timeout_setting = strsep(&input, ",")) &&
3323                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3324                         ret = kstrtol(timeout_setting, 0, &timeout);
3325                         if (ret)
3326                                 return ret;
3327
3328                         if (timeout == 0) {
3329                                 index++;
3330                                 continue;
3331                         } else if (timeout < 0) {
3332                                 timeout = MAX_SCHEDULE_TIMEOUT;
3333                                 dev_warn(adev->dev, "lockup timeout disabled");
3334                                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3335                         } else {
3336                                 timeout = msecs_to_jiffies(timeout);
3337                         }
3338
3339                         switch (index++) {
3340                         case 0:
3341                                 adev->gfx_timeout = timeout;
3342                                 break;
3343                         case 1:
3344                                 adev->compute_timeout = timeout;
3345                                 break;
3346                         case 2:
3347                                 adev->sdma_timeout = timeout;
3348                                 break;
3349                         case 3:
3350                                 adev->video_timeout = timeout;
3351                                 break;
3352                         default:
3353                                 break;
3354                         }
3355                 }
3356                 /*
3357                  * There is only one value specified and
3358                  * it should apply to all non-compute jobs.
3359                  */
3360                 if (index == 1) {
3361                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3362                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3363                                 adev->compute_timeout = adev->gfx_timeout;
3364                 }
3365         }
3366
3367         return ret;
3368 }
3369
3370 static const struct attribute *amdgpu_dev_attributes[] = {
3371         &dev_attr_product_name.attr,
3372         &dev_attr_product_number.attr,
3373         &dev_attr_serial_number.attr,
3374         &dev_attr_pcie_replay_count.attr,
3375         NULL
3376 };
3377
3378 /**
3379  * amdgpu_device_init - initialize the driver
3380  *
3381  * @adev: amdgpu_device pointer
3382  * @flags: driver flags
3383  *
3384  * Initializes the driver info and hw (all asics).
3385  * Returns 0 for success or an error on failure.
3386  * Called at driver startup.
3387  */
3388 int amdgpu_device_init(struct amdgpu_device *adev,
3389                        uint32_t flags)
3390 {
3391         struct drm_device *ddev = adev_to_drm(adev);
3392         struct pci_dev *pdev = adev->pdev;
3393         int r, i;
3394         bool px = false;
3395         u32 max_MBps;
3396
3397         adev->shutdown = false;
3398         adev->flags = flags;
3399
3400         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3401                 adev->asic_type = amdgpu_force_asic_type;
3402         else
3403                 adev->asic_type = flags & AMD_ASIC_MASK;
3404
3405         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3406         if (amdgpu_emu_mode == 1)
3407                 adev->usec_timeout *= 10;
3408         adev->gmc.gart_size = 512 * 1024 * 1024;
3409         adev->accel_working = false;
3410         adev->num_rings = 0;
3411         adev->mman.buffer_funcs = NULL;
3412         adev->mman.buffer_funcs_ring = NULL;
3413         adev->vm_manager.vm_pte_funcs = NULL;
3414         adev->vm_manager.vm_pte_num_scheds = 0;
3415         adev->gmc.gmc_funcs = NULL;
3416         adev->harvest_ip_mask = 0x0;
3417         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3418         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3419
3420         adev->smc_rreg = &amdgpu_invalid_rreg;
3421         adev->smc_wreg = &amdgpu_invalid_wreg;
3422         adev->pcie_rreg = &amdgpu_invalid_rreg;
3423         adev->pcie_wreg = &amdgpu_invalid_wreg;
3424         adev->pciep_rreg = &amdgpu_invalid_rreg;
3425         adev->pciep_wreg = &amdgpu_invalid_wreg;
3426         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3427         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3428         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3429         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3430         adev->didt_rreg = &amdgpu_invalid_rreg;
3431         adev->didt_wreg = &amdgpu_invalid_wreg;
3432         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3433         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3434         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3435         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3436
3437         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3438                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3439                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3440
3441         /* mutex initialization are all done here so we
3442          * can recall function without having locking issues */
3443         mutex_init(&adev->firmware.mutex);
3444         mutex_init(&adev->pm.mutex);
3445         mutex_init(&adev->gfx.gpu_clock_mutex);
3446         mutex_init(&adev->srbm_mutex);
3447         mutex_init(&adev->gfx.pipe_reserve_mutex);
3448         mutex_init(&adev->gfx.gfx_off_mutex);
3449         mutex_init(&adev->grbm_idx_mutex);
3450         mutex_init(&adev->mn_lock);
3451         mutex_init(&adev->virt.vf_errors.lock);
3452         hash_init(adev->mn_hash);
3453         atomic_set(&adev->in_gpu_reset, 0);
3454         init_rwsem(&adev->reset_sem);
3455         mutex_init(&adev->psp.mutex);
3456         mutex_init(&adev->notifier_lock);
3457
3458         r = amdgpu_device_init_apu_flags(adev);
3459         if (r)
3460                 return r;
3461
3462         r = amdgpu_device_check_arguments(adev);
3463         if (r)
3464                 return r;
3465
3466         spin_lock_init(&adev->mmio_idx_lock);
3467         spin_lock_init(&adev->smc_idx_lock);
3468         spin_lock_init(&adev->pcie_idx_lock);
3469         spin_lock_init(&adev->uvd_ctx_idx_lock);
3470         spin_lock_init(&adev->didt_idx_lock);
3471         spin_lock_init(&adev->gc_cac_idx_lock);
3472         spin_lock_init(&adev->se_cac_idx_lock);
3473         spin_lock_init(&adev->audio_endpt_idx_lock);
3474         spin_lock_init(&adev->mm_stats.lock);
3475
3476         INIT_LIST_HEAD(&adev->shadow_list);
3477         mutex_init(&adev->shadow_list_lock);
3478
3479         INIT_LIST_HEAD(&adev->reset_list);
3480
3481         INIT_DELAYED_WORK(&adev->delayed_init_work,
3482                           amdgpu_device_delayed_init_work_handler);
3483         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3484                           amdgpu_device_delay_enable_gfx_off);
3485
3486         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3487
3488         adev->gfx.gfx_off_req_count = 1;
3489         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3490
3491         atomic_set(&adev->throttling_logging_enabled, 1);
3492         /*
3493          * If throttling continues, logging will be performed every minute
3494          * to avoid log flooding. "-1" is subtracted since the thermal
3495          * throttling interrupt comes every second. Thus, the total logging
3496          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3497          * for throttling interrupt) = 60 seconds.
3498          */
3499         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3500         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3501
3502         /* Registers mapping */
3503         /* TODO: block userspace mapping of io register */
3504         if (adev->asic_type >= CHIP_BONAIRE) {
3505                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3506                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3507         } else {
3508                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3509                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3510         }
3511
3512         for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3513                 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3514
3515         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3516         if (adev->rmmio == NULL) {
3517                 return -ENOMEM;
3518         }
3519         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3520         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3521
3522         amdgpu_device_get_pcie_info(adev);
3523
3524         if (amdgpu_mcbp)
3525                 DRM_INFO("MCBP is enabled\n");
3526
3527         if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3528                 adev->enable_mes = true;
3529
3530         /* detect hw virtualization here */
3531         amdgpu_detect_virtualization(adev);
3532
3533         r = amdgpu_device_get_job_timeout_settings(adev);
3534         if (r) {
3535                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3536                 return r;
3537         }
3538
3539         /* early init functions */
3540         r = amdgpu_device_ip_early_init(adev);
3541         if (r)
3542                 return r;
3543
3544         /* enable PCIE atomic ops */
3545         if (amdgpu_sriov_vf(adev))
3546                 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3547                         adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags ==
3548                         (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3549         else
3550                 adev->have_atomics_support =
3551                         !pci_enable_atomic_ops_to_root(adev->pdev,
3552                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3553                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3554         if (!adev->have_atomics_support)
3555                 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3556
3557         /* doorbell bar mapping and doorbell index init*/
3558         amdgpu_device_doorbell_init(adev);
3559
3560         if (amdgpu_emu_mode == 1) {
3561                 /* post the asic on emulation mode */
3562                 emu_soc_asic_init(adev);
3563                 goto fence_driver_init;
3564         }
3565
3566         amdgpu_reset_init(adev);
3567
3568         /* detect if we are with an SRIOV vbios */
3569         amdgpu_device_detect_sriov_bios(adev);
3570
3571         /* check if we need to reset the asic
3572          *  E.g., driver was not cleanly unloaded previously, etc.
3573          */
3574         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3575                 if (adev->gmc.xgmi.num_physical_nodes) {
3576                         dev_info(adev->dev, "Pending hive reset.\n");
3577                         adev->gmc.xgmi.pending_reset = true;
3578                         /* Only need to init necessary block for SMU to handle the reset */
3579                         for (i = 0; i < adev->num_ip_blocks; i++) {
3580                                 if (!adev->ip_blocks[i].status.valid)
3581                                         continue;
3582                                 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3583                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3584                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3585                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3586                                         DRM_DEBUG("IP %s disabled for hw_init.\n",
3587                                                 adev->ip_blocks[i].version->funcs->name);
3588                                         adev->ip_blocks[i].status.hw = true;
3589                                 }
3590                         }
3591                 } else {
3592                         r = amdgpu_asic_reset(adev);
3593                         if (r) {
3594                                 dev_err(adev->dev, "asic reset on init failed\n");
3595                                 goto failed;
3596                         }
3597                 }
3598         }
3599
3600         pci_enable_pcie_error_reporting(adev->pdev);
3601
3602         /* Post card if necessary */
3603         if (amdgpu_device_need_post(adev)) {
3604                 if (!adev->bios) {
3605                         dev_err(adev->dev, "no vBIOS found\n");
3606                         r = -EINVAL;
3607                         goto failed;
3608                 }
3609                 DRM_INFO("GPU posting now...\n");
3610                 r = amdgpu_device_asic_init(adev);
3611                 if (r) {
3612                         dev_err(adev->dev, "gpu post error!\n");
3613                         goto failed;
3614                 }
3615         }
3616
3617         if (adev->is_atom_fw) {
3618                 /* Initialize clocks */
3619                 r = amdgpu_atomfirmware_get_clock_info(adev);
3620                 if (r) {
3621                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3622                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3623                         goto failed;
3624                 }
3625         } else {
3626                 /* Initialize clocks */
3627                 r = amdgpu_atombios_get_clock_info(adev);
3628                 if (r) {
3629                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3630                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3631                         goto failed;
3632                 }
3633                 /* init i2c buses */
3634                 if (!amdgpu_device_has_dc_support(adev))
3635                         amdgpu_atombios_i2c_init(adev);
3636         }
3637
3638 fence_driver_init:
3639         /* Fence driver */
3640         r = amdgpu_fence_driver_sw_init(adev);
3641         if (r) {
3642                 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3643                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3644                 goto failed;
3645         }
3646
3647         /* init the mode config */
3648         drm_mode_config_init(adev_to_drm(adev));
3649
3650         r = amdgpu_device_ip_init(adev);
3651         if (r) {
3652                 /* failed in exclusive mode due to timeout */
3653                 if (amdgpu_sriov_vf(adev) &&
3654                     !amdgpu_sriov_runtime(adev) &&
3655                     amdgpu_virt_mmio_blocked(adev) &&
3656                     !amdgpu_virt_wait_reset(adev)) {
3657                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3658                         /* Don't send request since VF is inactive. */
3659                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3660                         adev->virt.ops = NULL;
3661                         r = -EAGAIN;
3662                         goto release_ras_con;
3663                 }
3664                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3665                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3666                 goto release_ras_con;
3667         }
3668
3669         amdgpu_fence_driver_hw_init(adev);
3670
3671         dev_info(adev->dev,
3672                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3673                         adev->gfx.config.max_shader_engines,
3674                         adev->gfx.config.max_sh_per_se,
3675                         adev->gfx.config.max_cu_per_sh,
3676                         adev->gfx.cu_info.number);
3677
3678         adev->accel_working = true;
3679
3680         amdgpu_vm_check_compute_bug(adev);
3681
3682         /* Initialize the buffer migration limit. */
3683         if (amdgpu_moverate >= 0)
3684                 max_MBps = amdgpu_moverate;
3685         else
3686                 max_MBps = 8; /* Allow 8 MB/s. */
3687         /* Get a log2 for easy divisions. */
3688         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3689
3690         amdgpu_fbdev_init(adev);
3691
3692         r = amdgpu_pm_sysfs_init(adev);
3693         if (r) {
3694                 adev->pm_sysfs_en = false;
3695                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3696         } else
3697                 adev->pm_sysfs_en = true;
3698
3699         r = amdgpu_ucode_sysfs_init(adev);
3700         if (r) {
3701                 adev->ucode_sysfs_en = false;
3702                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3703         } else
3704                 adev->ucode_sysfs_en = true;
3705
3706         if ((amdgpu_testing & 1)) {
3707                 if (adev->accel_working)
3708                         amdgpu_test_moves(adev);
3709                 else
3710                         DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3711         }
3712         if (amdgpu_benchmarking) {
3713                 if (adev->accel_working)
3714                         amdgpu_benchmark(adev, amdgpu_benchmarking);
3715                 else
3716                         DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3717         }
3718
3719         /*
3720          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3721          * Otherwise the mgpu fan boost feature will be skipped due to the
3722          * gpu instance is counted less.
3723          */
3724         amdgpu_register_gpu_instance(adev);
3725
3726         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3727          * explicit gating rather than handling it automatically.
3728          */
3729         if (!adev->gmc.xgmi.pending_reset) {
3730                 r = amdgpu_device_ip_late_init(adev);
3731                 if (r) {
3732                         dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3733                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3734                         goto release_ras_con;
3735                 }
3736                 /* must succeed. */
3737                 amdgpu_ras_resume(adev);
3738                 queue_delayed_work(system_wq, &adev->delayed_init_work,
3739                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3740         }
3741
3742         if (amdgpu_sriov_vf(adev))
3743                 flush_delayed_work(&adev->delayed_init_work);
3744
3745         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3746         if (r)
3747                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3748
3749         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3750                 r = amdgpu_pmu_init(adev);
3751         if (r)
3752                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3753
3754         /* Have stored pci confspace at hand for restore in sudden PCI error */
3755         if (amdgpu_device_cache_pci_state(adev->pdev))
3756                 pci_restore_state(pdev);
3757
3758         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3759         /* this will fail for cards that aren't VGA class devices, just
3760          * ignore it */
3761         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3762                 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3763
3764         if (amdgpu_device_supports_px(ddev)) {
3765                 px = true;
3766                 vga_switcheroo_register_client(adev->pdev,
3767                                                &amdgpu_switcheroo_ops, px);
3768                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3769         }
3770
3771         if (adev->gmc.xgmi.pending_reset)
3772                 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3773                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3774
3775         return 0;
3776
3777 release_ras_con:
3778         amdgpu_release_ras_context(adev);
3779
3780 failed:
3781         amdgpu_vf_error_trans_all(adev);
3782
3783         return r;
3784 }
3785
3786 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3787 {
3788         /* Clear all CPU mappings pointing to this device */
3789         unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3790
3791         /* Unmap all mapped bars - Doorbell, registers and VRAM */
3792         amdgpu_device_doorbell_fini(adev);
3793
3794         iounmap(adev->rmmio);
3795         adev->rmmio = NULL;
3796         if (adev->mman.aper_base_kaddr)
3797                 iounmap(adev->mman.aper_base_kaddr);
3798         adev->mman.aper_base_kaddr = NULL;
3799
3800         /* Memory manager related */
3801         if (!adev->gmc.xgmi.connected_to_cpu) {
3802                 arch_phys_wc_del(adev->gmc.vram_mtrr);
3803                 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3804         }
3805 }
3806
3807 /**
3808  * amdgpu_device_fini - tear down the driver
3809  *
3810  * @adev: amdgpu_device pointer
3811  *
3812  * Tear down the driver info (all asics).
3813  * Called at driver shutdown.
3814  */
3815 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3816 {
3817         dev_info(adev->dev, "amdgpu: finishing device.\n");
3818         flush_delayed_work(&adev->delayed_init_work);
3819         if (adev->mman.initialized) {
3820                 flush_delayed_work(&adev->mman.bdev.wq);
3821                 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3822         }
3823         adev->shutdown = true;
3824
3825         /* make sure IB test finished before entering exclusive mode
3826          * to avoid preemption on IB test
3827          * */
3828         if (amdgpu_sriov_vf(adev)) {
3829                 amdgpu_virt_request_full_gpu(adev, false);
3830                 amdgpu_virt_fini_data_exchange(adev);
3831         }
3832
3833         /* disable all interrupts */
3834         amdgpu_irq_disable_all(adev);
3835         if (adev->mode_info.mode_config_initialized){
3836                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3837                         drm_helper_force_disable_all(adev_to_drm(adev));
3838                 else
3839                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3840         }
3841         amdgpu_fence_driver_hw_fini(adev);
3842
3843         if (adev->pm_sysfs_en)
3844                 amdgpu_pm_sysfs_fini(adev);
3845         if (adev->ucode_sysfs_en)
3846                 amdgpu_ucode_sysfs_fini(adev);
3847         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3848
3849         amdgpu_fbdev_fini(adev);
3850
3851         amdgpu_device_ip_fini_early(adev);
3852
3853         amdgpu_irq_fini_hw(adev);
3854
3855         ttm_device_clear_dma_mappings(&adev->mman.bdev);
3856
3857         amdgpu_gart_dummy_page_fini(adev);
3858
3859         amdgpu_device_unmap_mmio(adev);
3860 }
3861
3862 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3863 {
3864         amdgpu_fence_driver_sw_fini(adev);
3865         amdgpu_device_ip_fini(adev);
3866         release_firmware(adev->firmware.gpu_info_fw);
3867         adev->firmware.gpu_info_fw = NULL;
3868         adev->accel_working = false;
3869
3870         amdgpu_reset_fini(adev);
3871
3872         /* free i2c buses */
3873         if (!amdgpu_device_has_dc_support(adev))
3874                 amdgpu_i2c_fini(adev);
3875
3876         if (amdgpu_emu_mode != 1)
3877                 amdgpu_atombios_fini(adev);
3878
3879         kfree(adev->bios);
3880         adev->bios = NULL;
3881         if (amdgpu_device_supports_px(adev_to_drm(adev))) {
3882                 vga_switcheroo_unregister_client(adev->pdev);
3883                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3884         }
3885         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3886                 vga_client_unregister(adev->pdev);
3887
3888         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3889                 amdgpu_pmu_fini(adev);
3890         if (adev->mman.discovery_bin)
3891                 amdgpu_discovery_fini(adev);
3892
3893         kfree(adev->pci_state);
3894
3895 }
3896
3897 /**
3898  * amdgpu_device_evict_resources - evict device resources
3899  * @adev: amdgpu device object
3900  *
3901  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
3902  * of the vram memory type. Mainly used for evicting device resources
3903  * at suspend time.
3904  *
3905  */
3906 static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
3907 {
3908         /* No need to evict vram on APUs for suspend to ram */
3909         if (adev->in_s3 && (adev->flags & AMD_IS_APU))
3910                 return;
3911
3912         if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
3913                 DRM_WARN("evicting device resources failed\n");
3914
3915 }
3916
3917 /*
3918  * Suspend & resume.
3919  */
3920 /**
3921  * amdgpu_device_suspend - initiate device suspend
3922  *
3923  * @dev: drm dev pointer
3924  * @fbcon : notify the fbdev of suspend
3925  *
3926  * Puts the hw in the suspend state (all asics).
3927  * Returns 0 for success or an error on failure.
3928  * Called at driver suspend.
3929  */
3930 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3931 {
3932         struct amdgpu_device *adev = drm_to_adev(dev);
3933
3934         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3935                 return 0;
3936
3937         adev->in_suspend = true;
3938
3939         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
3940                 DRM_WARN("smart shift update failed\n");
3941
3942         drm_kms_helper_poll_disable(dev);
3943
3944         if (fbcon)
3945                 amdgpu_fbdev_set_suspend(adev, 1);
3946
3947         cancel_delayed_work_sync(&adev->delayed_init_work);
3948
3949         amdgpu_ras_suspend(adev);
3950
3951         amdgpu_device_ip_suspend_phase1(adev);
3952
3953         if (!adev->in_s0ix)
3954                 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
3955
3956         /* First evict vram memory */
3957         amdgpu_device_evict_resources(adev);
3958
3959         amdgpu_fence_driver_hw_fini(adev);
3960
3961         amdgpu_device_ip_suspend_phase2(adev);
3962         /* This second call to evict device resources is to evict
3963          * the gart page table using the CPU.
3964          */
3965         amdgpu_device_evict_resources(adev);
3966
3967         return 0;
3968 }
3969
3970 /**
3971  * amdgpu_device_resume - initiate device resume
3972  *
3973  * @dev: drm dev pointer
3974  * @fbcon : notify the fbdev of resume
3975  *
3976  * Bring the hw back to operating state (all asics).
3977  * Returns 0 for success or an error on failure.
3978  * Called at driver resume.
3979  */
3980 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3981 {
3982         struct amdgpu_device *adev = drm_to_adev(dev);
3983         int r = 0;
3984
3985         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3986                 return 0;
3987
3988         if (adev->in_s0ix)
3989                 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
3990
3991         /* post card */
3992         if (amdgpu_device_need_post(adev)) {
3993                 r = amdgpu_device_asic_init(adev);
3994                 if (r)
3995                         dev_err(adev->dev, "amdgpu asic init failed\n");
3996         }
3997
3998         r = amdgpu_device_ip_resume(adev);
3999         if (r) {
4000                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4001                 return r;
4002         }
4003         amdgpu_fence_driver_hw_init(adev);
4004
4005         r = amdgpu_device_ip_late_init(adev);
4006         if (r)
4007                 return r;
4008
4009         queue_delayed_work(system_wq, &adev->delayed_init_work,
4010                            msecs_to_jiffies(AMDGPU_RESUME_MS));
4011
4012         if (!adev->in_s0ix) {
4013                 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4014                 if (r)
4015                         return r;
4016         }
4017
4018         /* Make sure IB tests flushed */
4019         flush_delayed_work(&adev->delayed_init_work);
4020
4021         if (fbcon)
4022                 amdgpu_fbdev_set_suspend(adev, 0);
4023
4024         drm_kms_helper_poll_enable(dev);
4025
4026         amdgpu_ras_resume(adev);
4027
4028         /*
4029          * Most of the connector probing functions try to acquire runtime pm
4030          * refs to ensure that the GPU is powered on when connector polling is
4031          * performed. Since we're calling this from a runtime PM callback,
4032          * trying to acquire rpm refs will cause us to deadlock.
4033          *
4034          * Since we're guaranteed to be holding the rpm lock, it's safe to
4035          * temporarily disable the rpm helpers so this doesn't deadlock us.
4036          */
4037 #ifdef CONFIG_PM
4038         dev->dev->power.disable_depth++;
4039 #endif
4040         if (!amdgpu_device_has_dc_support(adev))
4041                 drm_helper_hpd_irq_event(dev);
4042         else
4043                 drm_kms_helper_hotplug_event(dev);
4044 #ifdef CONFIG_PM
4045         dev->dev->power.disable_depth--;
4046 #endif
4047         adev->in_suspend = false;
4048
4049         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4050                 DRM_WARN("smart shift update failed\n");
4051
4052         return 0;
4053 }
4054
4055 /**
4056  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4057  *
4058  * @adev: amdgpu_device pointer
4059  *
4060  * The list of all the hardware IPs that make up the asic is walked and
4061  * the check_soft_reset callbacks are run.  check_soft_reset determines
4062  * if the asic is still hung or not.
4063  * Returns true if any of the IPs are still in a hung state, false if not.
4064  */
4065 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4066 {
4067         int i;
4068         bool asic_hang = false;
4069
4070         if (amdgpu_sriov_vf(adev))
4071                 return true;
4072
4073         if (amdgpu_asic_need_full_reset(adev))
4074                 return true;
4075
4076         for (i = 0; i < adev->num_ip_blocks; i++) {
4077                 if (!adev->ip_blocks[i].status.valid)
4078                         continue;
4079                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4080                         adev->ip_blocks[i].status.hang =
4081                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4082                 if (adev->ip_blocks[i].status.hang) {
4083                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4084                         asic_hang = true;
4085                 }
4086         }
4087         return asic_hang;
4088 }
4089
4090 /**
4091  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4092  *
4093  * @adev: amdgpu_device pointer
4094  *
4095  * The list of all the hardware IPs that make up the asic is walked and the
4096  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4097  * handles any IP specific hardware or software state changes that are
4098  * necessary for a soft reset to succeed.
4099  * Returns 0 on success, negative error code on failure.
4100  */
4101 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4102 {
4103         int i, r = 0;
4104
4105         for (i = 0; i < adev->num_ip_blocks; i++) {
4106                 if (!adev->ip_blocks[i].status.valid)
4107                         continue;
4108                 if (adev->ip_blocks[i].status.hang &&
4109                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4110                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4111                         if (r)
4112                                 return r;
4113                 }
4114         }
4115
4116         return 0;
4117 }
4118
4119 /**
4120  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4121  *
4122  * @adev: amdgpu_device pointer
4123  *
4124  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4125  * reset is necessary to recover.
4126  * Returns true if a full asic reset is required, false if not.
4127  */
4128 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4129 {
4130         int i;
4131
4132         if (amdgpu_asic_need_full_reset(adev))
4133                 return true;
4134
4135         for (i = 0; i < adev->num_ip_blocks; i++) {
4136                 if (!adev->ip_blocks[i].status.valid)
4137                         continue;
4138                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4139                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4140                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4141                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4142                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4143                         if (adev->ip_blocks[i].status.hang) {
4144                                 dev_info(adev->dev, "Some block need full reset!\n");
4145                                 return true;
4146                         }
4147                 }
4148         }
4149         return false;
4150 }
4151
4152 /**
4153  * amdgpu_device_ip_soft_reset - do a soft reset
4154  *
4155  * @adev: amdgpu_device pointer
4156  *
4157  * The list of all the hardware IPs that make up the asic is walked and the
4158  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4159  * IP specific hardware or software state changes that are necessary to soft
4160  * reset the IP.
4161  * Returns 0 on success, negative error code on failure.
4162  */
4163 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4164 {
4165         int i, r = 0;
4166
4167         for (i = 0; i < adev->num_ip_blocks; i++) {
4168                 if (!adev->ip_blocks[i].status.valid)
4169                         continue;
4170                 if (adev->ip_blocks[i].status.hang &&
4171                     adev->ip_blocks[i].version->funcs->soft_reset) {
4172                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4173                         if (r)
4174                                 return r;
4175                 }
4176         }
4177
4178         return 0;
4179 }
4180
4181 /**
4182  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4183  *
4184  * @adev: amdgpu_device pointer
4185  *
4186  * The list of all the hardware IPs that make up the asic is walked and the
4187  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4188  * handles any IP specific hardware or software state changes that are
4189  * necessary after the IP has been soft reset.
4190  * Returns 0 on success, negative error code on failure.
4191  */
4192 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4193 {
4194         int i, r = 0;
4195
4196         for (i = 0; i < adev->num_ip_blocks; i++) {
4197                 if (!adev->ip_blocks[i].status.valid)
4198                         continue;
4199                 if (adev->ip_blocks[i].status.hang &&
4200                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4201                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4202                 if (r)
4203                         return r;
4204         }
4205
4206         return 0;
4207 }
4208
4209 /**
4210  * amdgpu_device_recover_vram - Recover some VRAM contents
4211  *
4212  * @adev: amdgpu_device pointer
4213  *
4214  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4215  * restore things like GPUVM page tables after a GPU reset where
4216  * the contents of VRAM might be lost.
4217  *
4218  * Returns:
4219  * 0 on success, negative error code on failure.
4220  */
4221 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4222 {
4223         struct dma_fence *fence = NULL, *next = NULL;
4224         struct amdgpu_bo *shadow;
4225         struct amdgpu_bo_vm *vmbo;
4226         long r = 1, tmo;
4227
4228         if (amdgpu_sriov_runtime(adev))
4229                 tmo = msecs_to_jiffies(8000);
4230         else
4231                 tmo = msecs_to_jiffies(100);
4232
4233         dev_info(adev->dev, "recover vram bo from shadow start\n");
4234         mutex_lock(&adev->shadow_list_lock);
4235         list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4236                 shadow = &vmbo->bo;
4237                 /* No need to recover an evicted BO */
4238                 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4239                     shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4240                     shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4241                         continue;
4242
4243                 r = amdgpu_bo_restore_shadow(shadow, &next);
4244                 if (r)
4245                         break;
4246
4247                 if (fence) {
4248                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4249                         dma_fence_put(fence);
4250                         fence = next;
4251                         if (tmo == 0) {
4252                                 r = -ETIMEDOUT;
4253                                 break;
4254                         } else if (tmo < 0) {
4255                                 r = tmo;
4256                                 break;
4257                         }
4258                 } else {
4259                         fence = next;
4260                 }
4261         }
4262         mutex_unlock(&adev->shadow_list_lock);
4263
4264         if (fence)
4265                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4266         dma_fence_put(fence);
4267
4268         if (r < 0 || tmo <= 0) {
4269                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4270                 return -EIO;
4271         }
4272
4273         dev_info(adev->dev, "recover vram bo from shadow done\n");
4274         return 0;
4275 }
4276
4277
4278 /**
4279  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4280  *
4281  * @adev: amdgpu_device pointer
4282  * @from_hypervisor: request from hypervisor
4283  *
4284  * do VF FLR and reinitialize Asic
4285  * return 0 means succeeded otherwise failed
4286  */
4287 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4288                                      bool from_hypervisor)
4289 {
4290         int r;
4291
4292         amdgpu_amdkfd_pre_reset(adev);
4293
4294         if (from_hypervisor)
4295                 r = amdgpu_virt_request_full_gpu(adev, true);
4296         else
4297                 r = amdgpu_virt_reset_gpu(adev);
4298         if (r)
4299                 return r;
4300
4301         /* Resume IP prior to SMC */
4302         r = amdgpu_device_ip_reinit_early_sriov(adev);
4303         if (r)
4304                 goto error;
4305
4306         amdgpu_virt_init_data_exchange(adev);
4307         /* we need recover gart prior to run SMC/CP/SDMA resume */
4308         amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
4309
4310         r = amdgpu_device_fw_loading(adev);
4311         if (r)
4312                 return r;
4313
4314         /* now we are okay to resume SMC/CP/SDMA */
4315         r = amdgpu_device_ip_reinit_late_sriov(adev);
4316         if (r)
4317                 goto error;
4318
4319         amdgpu_irq_gpu_reset_resume_helper(adev);
4320         r = amdgpu_ib_ring_tests(adev);
4321         amdgpu_amdkfd_post_reset(adev);
4322
4323 error:
4324         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4325                 amdgpu_inc_vram_lost(adev);
4326                 r = amdgpu_device_recover_vram(adev);
4327         }
4328         amdgpu_virt_release_full_gpu(adev, true);
4329
4330         return r;
4331 }
4332
4333 /**
4334  * amdgpu_device_has_job_running - check if there is any job in mirror list
4335  *
4336  * @adev: amdgpu_device pointer
4337  *
4338  * check if there is any job in mirror list
4339  */
4340 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4341 {
4342         int i;
4343         struct drm_sched_job *job;
4344
4345         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4346                 struct amdgpu_ring *ring = adev->rings[i];
4347
4348                 if (!ring || !ring->sched.thread)
4349                         continue;
4350
4351                 spin_lock(&ring->sched.job_list_lock);
4352                 job = list_first_entry_or_null(&ring->sched.pending_list,
4353                                                struct drm_sched_job, list);
4354                 spin_unlock(&ring->sched.job_list_lock);
4355                 if (job)
4356                         return true;
4357         }
4358         return false;
4359 }
4360
4361 /**
4362  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4363  *
4364  * @adev: amdgpu_device pointer
4365  *
4366  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4367  * a hung GPU.
4368  */
4369 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4370 {
4371         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4372                 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4373                 return false;
4374         }
4375
4376         if (amdgpu_gpu_recovery == 0)
4377                 goto disabled;
4378
4379         if (amdgpu_sriov_vf(adev))
4380                 return true;
4381
4382         if (amdgpu_gpu_recovery == -1) {
4383                 switch (adev->asic_type) {
4384                 case CHIP_BONAIRE:
4385                 case CHIP_HAWAII:
4386                 case CHIP_TOPAZ:
4387                 case CHIP_TONGA:
4388                 case CHIP_FIJI:
4389                 case CHIP_POLARIS10:
4390                 case CHIP_POLARIS11:
4391                 case CHIP_POLARIS12:
4392                 case CHIP_VEGAM:
4393                 case CHIP_VEGA20:
4394                 case CHIP_VEGA10:
4395                 case CHIP_VEGA12:
4396                 case CHIP_RAVEN:
4397                 case CHIP_ARCTURUS:
4398                 case CHIP_RENOIR:
4399                 case CHIP_NAVI10:
4400                 case CHIP_NAVI14:
4401                 case CHIP_NAVI12:
4402                 case CHIP_SIENNA_CICHLID:
4403                 case CHIP_NAVY_FLOUNDER:
4404                 case CHIP_DIMGREY_CAVEFISH:
4405                 case CHIP_BEIGE_GOBY:
4406                 case CHIP_VANGOGH:
4407                 case CHIP_ALDEBARAN:
4408                         break;
4409                 default:
4410                         goto disabled;
4411                 }
4412         }
4413
4414         return true;
4415
4416 disabled:
4417                 dev_info(adev->dev, "GPU recovery disabled.\n");
4418                 return false;
4419 }
4420
4421 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4422 {
4423         u32 i;
4424         int ret = 0;
4425
4426         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4427
4428         dev_info(adev->dev, "GPU mode1 reset\n");
4429
4430         /* disable BM */
4431         pci_clear_master(adev->pdev);
4432
4433         amdgpu_device_cache_pci_state(adev->pdev);
4434
4435         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4436                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4437                 ret = amdgpu_dpm_mode1_reset(adev);
4438         } else {
4439                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4440                 ret = psp_gpu_reset(adev);
4441         }
4442
4443         if (ret)
4444                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4445
4446         amdgpu_device_load_pci_state(adev->pdev);
4447
4448         /* wait for asic to come out of reset */
4449         for (i = 0; i < adev->usec_timeout; i++) {
4450                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4451
4452                 if (memsize != 0xffffffff)
4453                         break;
4454                 udelay(1);
4455         }
4456
4457         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4458         return ret;
4459 }
4460
4461 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4462                                  struct amdgpu_reset_context *reset_context)
4463 {
4464         int i, j, r = 0;
4465         struct amdgpu_job *job = NULL;
4466         bool need_full_reset =
4467                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4468
4469         if (reset_context->reset_req_dev == adev)
4470                 job = reset_context->job;
4471
4472         if (amdgpu_sriov_vf(adev)) {
4473                 /* stop the data exchange thread */
4474                 amdgpu_virt_fini_data_exchange(adev);
4475         }
4476
4477         /* block all schedulers and reset given job's ring */
4478         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4479                 struct amdgpu_ring *ring = adev->rings[i];
4480
4481                 if (!ring || !ring->sched.thread)
4482                         continue;
4483
4484                 /*clear job fence from fence drv to avoid force_completion
4485                  *leave NULL and vm flush fence in fence drv */
4486                 for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
4487                         struct dma_fence *old, **ptr;
4488
4489                         ptr = &ring->fence_drv.fences[j];
4490                         old = rcu_dereference_protected(*ptr, 1);
4491                         if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
4492                                 RCU_INIT_POINTER(*ptr, NULL);
4493                         }
4494                 }
4495                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4496                 amdgpu_fence_driver_force_completion(ring);
4497         }
4498
4499         if (job && job->vm)
4500                 drm_sched_increase_karma(&job->base);
4501
4502         r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4503         /* If reset handler not implemented, continue; otherwise return */
4504         if (r == -ENOSYS)
4505                 r = 0;
4506         else
4507                 return r;
4508
4509         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4510         if (!amdgpu_sriov_vf(adev)) {
4511
4512                 if (!need_full_reset)
4513                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4514
4515                 if (!need_full_reset) {
4516                         amdgpu_device_ip_pre_soft_reset(adev);
4517                         r = amdgpu_device_ip_soft_reset(adev);
4518                         amdgpu_device_ip_post_soft_reset(adev);
4519                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4520                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4521                                 need_full_reset = true;
4522                         }
4523                 }
4524
4525                 if (need_full_reset)
4526                         r = amdgpu_device_ip_suspend(adev);
4527                 if (need_full_reset)
4528                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4529                 else
4530                         clear_bit(AMDGPU_NEED_FULL_RESET,
4531                                   &reset_context->flags);
4532         }
4533
4534         return r;
4535 }
4536
4537 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4538                          struct amdgpu_reset_context *reset_context)
4539 {
4540         struct amdgpu_device *tmp_adev = NULL;
4541         bool need_full_reset, skip_hw_reset, vram_lost = false;
4542         int r = 0;
4543
4544         /* Try reset handler method first */
4545         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4546                                     reset_list);
4547         r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4548         /* If reset handler not implemented, continue; otherwise return */
4549         if (r == -ENOSYS)
4550                 r = 0;
4551         else
4552                 return r;
4553
4554         /* Reset handler not implemented, use the default method */
4555         need_full_reset =
4556                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4557         skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4558
4559         /*
4560          * ASIC reset has to be done on all XGMI hive nodes ASAP
4561          * to allow proper links negotiation in FW (within 1 sec)
4562          */
4563         if (!skip_hw_reset && need_full_reset) {
4564                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4565                         /* For XGMI run all resets in parallel to speed up the process */
4566                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4567                                 tmp_adev->gmc.xgmi.pending_reset = false;
4568                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4569                                         r = -EALREADY;
4570                         } else
4571                                 r = amdgpu_asic_reset(tmp_adev);
4572
4573                         if (r) {
4574                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4575                                          r, adev_to_drm(tmp_adev)->unique);
4576                                 break;
4577                         }
4578                 }
4579
4580                 /* For XGMI wait for all resets to complete before proceed */
4581                 if (!r) {
4582                         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4583                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4584                                         flush_work(&tmp_adev->xgmi_reset_work);
4585                                         r = tmp_adev->asic_reset_res;
4586                                         if (r)
4587                                                 break;
4588                                 }
4589                         }
4590                 }
4591         }
4592
4593         if (!r && amdgpu_ras_intr_triggered()) {
4594                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4595                         if (tmp_adev->mmhub.ras_funcs &&
4596                             tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
4597                                 tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
4598                 }
4599
4600                 amdgpu_ras_intr_cleared();
4601         }
4602
4603         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4604                 if (need_full_reset) {
4605                         /* post card */
4606                         r = amdgpu_device_asic_init(tmp_adev);
4607                         if (r) {
4608                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4609                         } else {
4610                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4611                                 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4612                                 if (r)
4613                                         goto out;
4614
4615                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4616                                 if (r)
4617                                         goto out;
4618
4619                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4620                                 if (vram_lost) {
4621                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4622                                         amdgpu_inc_vram_lost(tmp_adev);
4623                                 }
4624
4625                                 r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
4626                                 if (r)
4627                                         goto out;
4628
4629                                 r = amdgpu_device_fw_loading(tmp_adev);
4630                                 if (r)
4631                                         return r;
4632
4633                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4634                                 if (r)
4635                                         goto out;
4636
4637                                 if (vram_lost)
4638                                         amdgpu_device_fill_reset_magic(tmp_adev);
4639
4640                                 /*
4641                                  * Add this ASIC as tracked as reset was already
4642                                  * complete successfully.
4643                                  */
4644                                 amdgpu_register_gpu_instance(tmp_adev);
4645
4646                                 if (!reset_context->hive &&
4647                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4648                                         amdgpu_xgmi_add_device(tmp_adev);
4649
4650                                 r = amdgpu_device_ip_late_init(tmp_adev);
4651                                 if (r)
4652                                         goto out;
4653
4654                                 amdgpu_fbdev_set_suspend(tmp_adev, 0);
4655
4656                                 /*
4657                                  * The GPU enters bad state once faulty pages
4658                                  * by ECC has reached the threshold, and ras
4659                                  * recovery is scheduled next. So add one check
4660                                  * here to break recovery if it indeed exceeds
4661                                  * bad page threshold, and remind user to
4662                                  * retire this GPU or setting one bigger
4663                                  * bad_page_threshold value to fix this once
4664                                  * probing driver again.
4665                                  */
4666                                 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4667                                         /* must succeed. */
4668                                         amdgpu_ras_resume(tmp_adev);
4669                                 } else {
4670                                         r = -EINVAL;
4671                                         goto out;
4672                                 }
4673
4674                                 /* Update PSP FW topology after reset */
4675                                 if (reset_context->hive &&
4676                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4677                                         r = amdgpu_xgmi_update_topology(
4678                                                 reset_context->hive, tmp_adev);
4679                         }
4680                 }
4681
4682 out:
4683                 if (!r) {
4684                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4685                         r = amdgpu_ib_ring_tests(tmp_adev);
4686                         if (r) {
4687                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4688                                 need_full_reset = true;
4689                                 r = -EAGAIN;
4690                                 goto end;
4691                         }
4692                 }
4693
4694                 if (!r)
4695                         r = amdgpu_device_recover_vram(tmp_adev);
4696                 else
4697                         tmp_adev->asic_reset_res = r;
4698         }
4699
4700 end:
4701         if (need_full_reset)
4702                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4703         else
4704                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4705         return r;
4706 }
4707
4708 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4709                                 struct amdgpu_hive_info *hive)
4710 {
4711         if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4712                 return false;
4713
4714         if (hive) {
4715                 down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4716         } else {
4717                 down_write(&adev->reset_sem);
4718         }
4719
4720         switch (amdgpu_asic_reset_method(adev)) {
4721         case AMD_RESET_METHOD_MODE1:
4722                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4723                 break;
4724         case AMD_RESET_METHOD_MODE2:
4725                 adev->mp1_state = PP_MP1_STATE_RESET;
4726                 break;
4727         default:
4728                 adev->mp1_state = PP_MP1_STATE_NONE;
4729                 break;
4730         }
4731
4732         return true;
4733 }
4734
4735 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4736 {
4737         amdgpu_vf_error_trans_all(adev);
4738         adev->mp1_state = PP_MP1_STATE_NONE;
4739         atomic_set(&adev->in_gpu_reset, 0);
4740         up_write(&adev->reset_sem);
4741 }
4742
4743 /*
4744  * to lockup a list of amdgpu devices in a hive safely, if not a hive
4745  * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4746  *
4747  * unlock won't require roll back.
4748  */
4749 static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4750 {
4751         struct amdgpu_device *tmp_adev = NULL;
4752
4753         if (adev->gmc.xgmi.num_physical_nodes > 1) {
4754                 if (!hive) {
4755                         dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4756                         return -ENODEV;
4757                 }
4758                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4759                         if (!amdgpu_device_lock_adev(tmp_adev, hive))
4760                                 goto roll_back;
4761                 }
4762         } else if (!amdgpu_device_lock_adev(adev, hive))
4763                 return -EAGAIN;
4764
4765         return 0;
4766 roll_back:
4767         if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4768                 /*
4769                  * if the lockup iteration break in the middle of a hive,
4770                  * it may means there may has a race issue,
4771                  * or a hive device locked up independently.
4772                  * we may be in trouble and may not, so will try to roll back
4773                  * the lock and give out a warnning.
4774                  */
4775                 dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4776                 list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4777                         amdgpu_device_unlock_adev(tmp_adev);
4778                 }
4779         }
4780         return -EAGAIN;
4781 }
4782
4783 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4784 {
4785         struct pci_dev *p = NULL;
4786
4787         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4788                         adev->pdev->bus->number, 1);
4789         if (p) {
4790                 pm_runtime_enable(&(p->dev));
4791                 pm_runtime_resume(&(p->dev));
4792         }
4793 }
4794
4795 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4796 {
4797         enum amd_reset_method reset_method;
4798         struct pci_dev *p = NULL;
4799         u64 expires;
4800
4801         /*
4802          * For now, only BACO and mode1 reset are confirmed
4803          * to suffer the audio issue without proper suspended.
4804          */
4805         reset_method = amdgpu_asic_reset_method(adev);
4806         if ((reset_method != AMD_RESET_METHOD_BACO) &&
4807              (reset_method != AMD_RESET_METHOD_MODE1))
4808                 return -EINVAL;
4809
4810         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4811                         adev->pdev->bus->number, 1);
4812         if (!p)
4813                 return -ENODEV;
4814
4815         expires = pm_runtime_autosuspend_expiration(&(p->dev));
4816         if (!expires)
4817                 /*
4818                  * If we cannot get the audio device autosuspend delay,
4819                  * a fixed 4S interval will be used. Considering 3S is
4820                  * the audio controller default autosuspend delay setting.
4821                  * 4S used here is guaranteed to cover that.
4822                  */
4823                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4824
4825         while (!pm_runtime_status_suspended(&(p->dev))) {
4826                 if (!pm_runtime_suspend(&(p->dev)))
4827                         break;
4828
4829                 if (expires < ktime_get_mono_fast_ns()) {
4830                         dev_warn(adev->dev, "failed to suspend display audio\n");
4831                         /* TODO: abort the succeeding gpu reset? */
4832                         return -ETIMEDOUT;
4833                 }
4834         }
4835
4836         pm_runtime_disable(&(p->dev));
4837
4838         return 0;
4839 }
4840
4841 static void amdgpu_device_recheck_guilty_jobs(
4842         struct amdgpu_device *adev, struct list_head *device_list_handle,
4843         struct amdgpu_reset_context *reset_context)
4844 {
4845         int i, r = 0;
4846
4847         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4848                 struct amdgpu_ring *ring = adev->rings[i];
4849                 int ret = 0;
4850                 struct drm_sched_job *s_job;
4851
4852                 if (!ring || !ring->sched.thread)
4853                         continue;
4854
4855                 s_job = list_first_entry_or_null(&ring->sched.pending_list,
4856                                 struct drm_sched_job, list);
4857                 if (s_job == NULL)
4858                         continue;
4859
4860                 /* clear job's guilty and depend the folowing step to decide the real one */
4861                 drm_sched_reset_karma(s_job);
4862                 /* for the real bad job, it will be resubmitted twice, adding a dma_fence_get
4863                  * to make sure fence is balanced */
4864                 dma_fence_get(s_job->s_fence->parent);
4865                 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
4866
4867                 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
4868                 if (ret == 0) { /* timeout */
4869                         DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
4870                                                 ring->sched.name, s_job->id);
4871
4872                         /* set guilty */
4873                         drm_sched_increase_karma(s_job);
4874 retry:
4875                         /* do hw reset */
4876                         if (amdgpu_sriov_vf(adev)) {
4877                                 amdgpu_virt_fini_data_exchange(adev);
4878                                 r = amdgpu_device_reset_sriov(adev, false);
4879                                 if (r)
4880                                         adev->asic_reset_res = r;
4881                         } else {
4882                                 clear_bit(AMDGPU_SKIP_HW_RESET,
4883                                           &reset_context->flags);
4884                                 r = amdgpu_do_asic_reset(device_list_handle,
4885                                                          reset_context);
4886                                 if (r && r == -EAGAIN)
4887                                         goto retry;
4888                         }
4889
4890                         /*
4891                          * add reset counter so that the following
4892                          * resubmitted job could flush vmid
4893                          */
4894                         atomic_inc(&adev->gpu_reset_counter);
4895                         continue;
4896                 }
4897
4898                 /* got the hw fence, signal finished fence */
4899                 atomic_dec(ring->sched.score);
4900                 dma_fence_put(s_job->s_fence->parent);
4901                 dma_fence_get(&s_job->s_fence->finished);
4902                 dma_fence_signal(&s_job->s_fence->finished);
4903                 dma_fence_put(&s_job->s_fence->finished);
4904
4905                 /* remove node from list and free the job */
4906                 spin_lock(&ring->sched.job_list_lock);
4907                 list_del_init(&s_job->list);
4908                 spin_unlock(&ring->sched.job_list_lock);
4909                 ring->sched.ops->free_job(s_job);
4910         }
4911 }
4912
4913 /**
4914  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4915  *
4916  * @adev: amdgpu_device pointer
4917  * @job: which job trigger hang
4918  *
4919  * Attempt to reset the GPU if it has hung (all asics).
4920  * Attempt to do soft-reset or full-reset and reinitialize Asic
4921  * Returns 0 for success or an error on failure.
4922  */
4923
4924 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4925                               struct amdgpu_job *job)
4926 {
4927         struct list_head device_list, *device_list_handle =  NULL;
4928         bool job_signaled = false;
4929         struct amdgpu_hive_info *hive = NULL;
4930         struct amdgpu_device *tmp_adev = NULL;
4931         int i, r = 0;
4932         bool need_emergency_restart = false;
4933         bool audio_suspended = false;
4934         int tmp_vram_lost_counter;
4935         struct amdgpu_reset_context reset_context;
4936
4937         memset(&reset_context, 0, sizeof(reset_context));
4938
4939         /*
4940          * Special case: RAS triggered and full reset isn't supported
4941          */
4942         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4943
4944         /*
4945          * Flush RAM to disk so that after reboot
4946          * the user can read log and see why the system rebooted.
4947          */
4948         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4949                 DRM_WARN("Emergency reboot.");
4950
4951                 ksys_sync_helper();
4952                 emergency_restart();
4953         }
4954
4955         dev_info(adev->dev, "GPU %s begin!\n",
4956                 need_emergency_restart ? "jobs stop":"reset");
4957
4958         /*
4959          * Here we trylock to avoid chain of resets executing from
4960          * either trigger by jobs on different adevs in XGMI hive or jobs on
4961          * different schedulers for same device while this TO handler is running.
4962          * We always reset all schedulers for device and all devices for XGMI
4963          * hive so that should take care of them too.
4964          */
4965         hive = amdgpu_get_xgmi_hive(adev);
4966         if (hive) {
4967                 if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4968                         DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4969                                 job ? job->base.id : -1, hive->hive_id);
4970                         amdgpu_put_xgmi_hive(hive);
4971                         if (job && job->vm)
4972                                 drm_sched_increase_karma(&job->base);
4973                         return 0;
4974                 }
4975                 mutex_lock(&hive->hive_lock);
4976         }
4977
4978         reset_context.method = AMD_RESET_METHOD_NONE;
4979         reset_context.reset_req_dev = adev;
4980         reset_context.job = job;
4981         reset_context.hive = hive;
4982         clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
4983
4984         /*
4985          * lock the device before we try to operate the linked list
4986          * if didn't get the device lock, don't touch the linked list since
4987          * others may iterating it.
4988          */
4989         r = amdgpu_device_lock_hive_adev(adev, hive);
4990         if (r) {
4991                 dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4992                                         job ? job->base.id : -1);
4993
4994                 /* even we skipped this reset, still need to set the job to guilty */
4995                 if (job && job->vm)
4996                         drm_sched_increase_karma(&job->base);
4997                 goto skip_recovery;
4998         }
4999
5000         /*
5001          * Build list of devices to reset.
5002          * In case we are in XGMI hive mode, resort the device list
5003          * to put adev in the 1st position.
5004          */
5005         INIT_LIST_HEAD(&device_list);
5006         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5007                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
5008                         list_add_tail(&tmp_adev->reset_list, &device_list);
5009                 if (!list_is_first(&adev->reset_list, &device_list))
5010                         list_rotate_to_front(&adev->reset_list, &device_list);
5011                 device_list_handle = &device_list;
5012         } else {
5013                 list_add_tail(&adev->reset_list, &device_list);
5014                 device_list_handle = &device_list;
5015         }
5016
5017         /* block all schedulers and reset given job's ring */
5018         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5019                 /*
5020                  * Try to put the audio codec into suspend state
5021                  * before gpu reset started.
5022                  *
5023                  * Due to the power domain of the graphics device
5024                  * is shared with AZ power domain. Without this,
5025                  * we may change the audio hardware from behind
5026                  * the audio driver's back. That will trigger
5027                  * some audio codec errors.
5028                  */
5029                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5030                         audio_suspended = true;
5031
5032                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5033
5034                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5035
5036                 if (!amdgpu_sriov_vf(tmp_adev))
5037                         amdgpu_amdkfd_pre_reset(tmp_adev);
5038
5039                 /*
5040                  * Mark these ASICs to be reseted as untracked first
5041                  * And add them back after reset completed
5042                  */
5043                 amdgpu_unregister_gpu_instance(tmp_adev);
5044
5045                 amdgpu_fbdev_set_suspend(tmp_adev, 1);
5046
5047                 /* disable ras on ALL IPs */
5048                 if (!need_emergency_restart &&
5049                       amdgpu_device_ip_need_full_reset(tmp_adev))
5050                         amdgpu_ras_suspend(tmp_adev);
5051
5052                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5053                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5054
5055                         if (!ring || !ring->sched.thread)
5056                                 continue;
5057
5058                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5059
5060                         if (need_emergency_restart)
5061                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5062                 }
5063                 atomic_inc(&tmp_adev->gpu_reset_counter);
5064         }
5065
5066         if (need_emergency_restart)
5067                 goto skip_sched_resume;
5068
5069         /*
5070          * Must check guilty signal here since after this point all old
5071          * HW fences are force signaled.
5072          *
5073          * job->base holds a reference to parent fence
5074          */
5075         if (job && job->base.s_fence->parent &&
5076             dma_fence_is_signaled(job->base.s_fence->parent)) {
5077                 job_signaled = true;
5078                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5079                 goto skip_hw_reset;
5080         }
5081
5082 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
5083         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5084                 r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
5085                 /*TODO Should we stop ?*/
5086                 if (r) {
5087                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5088                                   r, adev_to_drm(tmp_adev)->unique);
5089                         tmp_adev->asic_reset_res = r;
5090                 }
5091         }
5092
5093         tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5094         /* Actual ASIC resets if needed.*/
5095         /* Host driver will handle XGMI hive reset for SRIOV */
5096         if (amdgpu_sriov_vf(adev)) {
5097                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5098                 if (r)
5099                         adev->asic_reset_res = r;
5100         } else {
5101                 r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
5102                 if (r && r == -EAGAIN)
5103                         goto retry;
5104         }
5105
5106 skip_hw_reset:
5107
5108         /* Post ASIC reset for all devs .*/
5109         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5110
5111                 /*
5112                  * Sometimes a later bad compute job can block a good gfx job as gfx
5113                  * and compute ring share internal GC HW mutually. We add an additional
5114                  * guilty jobs recheck step to find the real guilty job, it synchronously
5115                  * submits and pends for the first job being signaled. If it gets timeout,
5116                  * we identify it as a real guilty job.
5117                  */
5118                 if (amdgpu_gpu_recovery == 2 &&
5119                         !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5120                         amdgpu_device_recheck_guilty_jobs(
5121                                 tmp_adev, device_list_handle, &reset_context);
5122
5123                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5124                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5125
5126                         if (!ring || !ring->sched.thread)
5127                                 continue;
5128
5129                         /* No point to resubmit jobs if we didn't HW reset*/
5130                         if (!tmp_adev->asic_reset_res && !job_signaled)
5131                                 drm_sched_resubmit_jobs(&ring->sched);
5132
5133                         drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5134                 }
5135
5136                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5137                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5138                 }
5139
5140                 tmp_adev->asic_reset_res = 0;
5141
5142                 if (r) {
5143                         /* bad news, how to tell it to userspace ? */
5144                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5145                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5146                 } else {
5147                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5148                         if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5149                                 DRM_WARN("smart shift update failed\n");
5150                 }
5151         }
5152
5153 skip_sched_resume:
5154         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5155                 /* unlock kfd: SRIOV would do it separately */
5156                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5157                         amdgpu_amdkfd_post_reset(tmp_adev);
5158
5159                 /* kfd_post_reset will do nothing if kfd device is not initialized,
5160                  * need to bring up kfd here if it's not be initialized before
5161                  */
5162                 if (!adev->kfd.init_complete)
5163                         amdgpu_amdkfd_device_init(adev);
5164
5165                 if (audio_suspended)
5166                         amdgpu_device_resume_display_audio(tmp_adev);
5167                 amdgpu_device_unlock_adev(tmp_adev);
5168         }
5169
5170 skip_recovery:
5171         if (hive) {
5172                 atomic_set(&hive->in_reset, 0);
5173                 mutex_unlock(&hive->hive_lock);
5174                 amdgpu_put_xgmi_hive(hive);
5175         }
5176
5177         if (r && r != -EAGAIN)
5178                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5179         return r;
5180 }
5181
5182 /**
5183  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5184  *
5185  * @adev: amdgpu_device pointer
5186  *
5187  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5188  * and lanes) of the slot the device is in. Handles APUs and
5189  * virtualized environments where PCIE config space may not be available.
5190  */
5191 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5192 {
5193         struct pci_dev *pdev;
5194         enum pci_bus_speed speed_cap, platform_speed_cap;
5195         enum pcie_link_width platform_link_width;
5196
5197         if (amdgpu_pcie_gen_cap)
5198                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5199
5200         if (amdgpu_pcie_lane_cap)
5201                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5202
5203         /* covers APUs as well */
5204         if (pci_is_root_bus(adev->pdev->bus)) {
5205                 if (adev->pm.pcie_gen_mask == 0)
5206                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5207                 if (adev->pm.pcie_mlw_mask == 0)
5208                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5209                 return;
5210         }
5211
5212         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5213                 return;
5214
5215         pcie_bandwidth_available(adev->pdev, NULL,
5216                                  &platform_speed_cap, &platform_link_width);
5217
5218         if (adev->pm.pcie_gen_mask == 0) {
5219                 /* asic caps */
5220                 pdev = adev->pdev;
5221                 speed_cap = pcie_get_speed_cap(pdev);
5222                 if (speed_cap == PCI_SPEED_UNKNOWN) {
5223                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5224                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5225                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5226                 } else {
5227                         if (speed_cap == PCIE_SPEED_32_0GT)
5228                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5229                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5230                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5231                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5232                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5233                         else if (speed_cap == PCIE_SPEED_16_0GT)
5234                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5235                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5236                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5237                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5238                         else if (speed_cap == PCIE_SPEED_8_0GT)
5239                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5240                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5241                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5242                         else if (speed_cap == PCIE_SPEED_5_0GT)
5243                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5244                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5245                         else
5246                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5247                 }
5248                 /* platform caps */
5249                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5250                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5251                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5252                 } else {
5253                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
5254                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5255                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5256                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5257                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5258                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5259                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5260                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5261                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5262                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5263                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5264                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5265                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5266                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5267                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5268                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5269                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5270                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5271                         else
5272                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5273
5274                 }
5275         }
5276         if (adev->pm.pcie_mlw_mask == 0) {
5277                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5278                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5279                 } else {
5280                         switch (platform_link_width) {
5281                         case PCIE_LNK_X32:
5282                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5283                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5284                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5285                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5286                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5287                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5288                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5289                                 break;
5290                         case PCIE_LNK_X16:
5291                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5292                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5293                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5294                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5295                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5296                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5297                                 break;
5298                         case PCIE_LNK_X12:
5299                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5300                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5301                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5302                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5303                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5304                                 break;
5305                         case PCIE_LNK_X8:
5306                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5307                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5308                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5309                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5310                                 break;
5311                         case PCIE_LNK_X4:
5312                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5313                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5314                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5315                                 break;
5316                         case PCIE_LNK_X2:
5317                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5318                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5319                                 break;
5320                         case PCIE_LNK_X1:
5321                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5322                                 break;
5323                         default:
5324                                 break;
5325                         }
5326                 }
5327         }
5328 }
5329
5330 int amdgpu_device_baco_enter(struct drm_device *dev)
5331 {
5332         struct amdgpu_device *adev = drm_to_adev(dev);
5333         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5334
5335         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5336                 return -ENOTSUPP;
5337
5338         if (ras && adev->ras_enabled &&
5339             adev->nbio.funcs->enable_doorbell_interrupt)
5340                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5341
5342         return amdgpu_dpm_baco_enter(adev);
5343 }
5344
5345 int amdgpu_device_baco_exit(struct drm_device *dev)
5346 {
5347         struct amdgpu_device *adev = drm_to_adev(dev);
5348         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5349         int ret = 0;
5350
5351         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5352                 return -ENOTSUPP;
5353
5354         ret = amdgpu_dpm_baco_exit(adev);
5355         if (ret)
5356                 return ret;
5357
5358         if (ras && adev->ras_enabled &&
5359             adev->nbio.funcs->enable_doorbell_interrupt)
5360                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5361
5362         if (amdgpu_passthrough(adev) &&
5363             adev->nbio.funcs->clear_doorbell_interrupt)
5364                 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5365
5366         return 0;
5367 }
5368
5369 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
5370 {
5371         int i;
5372
5373         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5374                 struct amdgpu_ring *ring = adev->rings[i];
5375
5376                 if (!ring || !ring->sched.thread)
5377                         continue;
5378
5379                 cancel_delayed_work_sync(&ring->sched.work_tdr);
5380         }
5381 }
5382
5383 /**
5384  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5385  * @pdev: PCI device struct
5386  * @state: PCI channel state
5387  *
5388  * Description: Called when a PCI error is detected.
5389  *
5390  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5391  */
5392 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5393 {
5394         struct drm_device *dev = pci_get_drvdata(pdev);
5395         struct amdgpu_device *adev = drm_to_adev(dev);
5396         int i;
5397
5398         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5399
5400         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5401                 DRM_WARN("No support for XGMI hive yet...");
5402                 return PCI_ERS_RESULT_DISCONNECT;
5403         }
5404
5405         adev->pci_channel_state = state;
5406
5407         switch (state) {
5408         case pci_channel_io_normal:
5409                 return PCI_ERS_RESULT_CAN_RECOVER;
5410         /* Fatal error, prepare for slot reset */
5411         case pci_channel_io_frozen:
5412                 /*
5413                  * Cancel and wait for all TDRs in progress if failing to
5414                  * set  adev->in_gpu_reset in amdgpu_device_lock_adev
5415                  *
5416                  * Locking adev->reset_sem will prevent any external access
5417                  * to GPU during PCI error recovery
5418                  */
5419                 while (!amdgpu_device_lock_adev(adev, NULL))
5420                         amdgpu_cancel_all_tdr(adev);
5421
5422                 /*
5423                  * Block any work scheduling as we do for regular GPU reset
5424                  * for the duration of the recovery
5425                  */
5426                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5427                         struct amdgpu_ring *ring = adev->rings[i];
5428
5429                         if (!ring || !ring->sched.thread)
5430                                 continue;
5431
5432                         drm_sched_stop(&ring->sched, NULL);
5433                 }
5434                 atomic_inc(&adev->gpu_reset_counter);
5435                 return PCI_ERS_RESULT_NEED_RESET;
5436         case pci_channel_io_perm_failure:
5437                 /* Permanent error, prepare for device removal */
5438                 return PCI_ERS_RESULT_DISCONNECT;
5439         }
5440
5441         return PCI_ERS_RESULT_NEED_RESET;
5442 }
5443
5444 /**
5445  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5446  * @pdev: pointer to PCI device
5447  */
5448 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5449 {
5450
5451         DRM_INFO("PCI error: mmio enabled callback!!\n");
5452
5453         /* TODO - dump whatever for debugging purposes */
5454
5455         /* This called only if amdgpu_pci_error_detected returns
5456          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5457          * works, no need to reset slot.
5458          */
5459
5460         return PCI_ERS_RESULT_RECOVERED;
5461 }
5462
5463 /**
5464  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5465  * @pdev: PCI device struct
5466  *
5467  * Description: This routine is called by the pci error recovery
5468  * code after the PCI slot has been reset, just before we
5469  * should resume normal operations.
5470  */
5471 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5472 {
5473         struct drm_device *dev = pci_get_drvdata(pdev);
5474         struct amdgpu_device *adev = drm_to_adev(dev);
5475         int r, i;
5476         struct amdgpu_reset_context reset_context;
5477         u32 memsize;
5478         struct list_head device_list;
5479
5480         DRM_INFO("PCI error: slot reset callback!!\n");
5481
5482         memset(&reset_context, 0, sizeof(reset_context));
5483
5484         INIT_LIST_HEAD(&device_list);
5485         list_add_tail(&adev->reset_list, &device_list);
5486
5487         /* wait for asic to come out of reset */
5488         msleep(500);
5489
5490         /* Restore PCI confspace */
5491         amdgpu_device_load_pci_state(pdev);
5492
5493         /* confirm  ASIC came out of reset */
5494         for (i = 0; i < adev->usec_timeout; i++) {
5495                 memsize = amdgpu_asic_get_config_memsize(adev);
5496
5497                 if (memsize != 0xffffffff)
5498                         break;
5499                 udelay(1);
5500         }
5501         if (memsize == 0xffffffff) {
5502                 r = -ETIME;
5503                 goto out;
5504         }
5505
5506         reset_context.method = AMD_RESET_METHOD_NONE;
5507         reset_context.reset_req_dev = adev;
5508         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5509         set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5510
5511         adev->no_hw_access = true;
5512         r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5513         adev->no_hw_access = false;
5514         if (r)
5515                 goto out;
5516
5517         r = amdgpu_do_asic_reset(&device_list, &reset_context);
5518
5519 out:
5520         if (!r) {
5521                 if (amdgpu_device_cache_pci_state(adev->pdev))
5522                         pci_restore_state(adev->pdev);
5523
5524                 DRM_INFO("PCIe error recovery succeeded\n");
5525         } else {
5526                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5527                 amdgpu_device_unlock_adev(adev);
5528         }
5529
5530         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5531 }
5532
5533 /**
5534  * amdgpu_pci_resume() - resume normal ops after PCI reset
5535  * @pdev: pointer to PCI device
5536  *
5537  * Called when the error recovery driver tells us that its
5538  * OK to resume normal operation.
5539  */
5540 void amdgpu_pci_resume(struct pci_dev *pdev)
5541 {
5542         struct drm_device *dev = pci_get_drvdata(pdev);
5543         struct amdgpu_device *adev = drm_to_adev(dev);
5544         int i;
5545
5546
5547         DRM_INFO("PCI error: resume callback!!\n");
5548
5549         /* Only continue execution for the case of pci_channel_io_frozen */
5550         if (adev->pci_channel_state != pci_channel_io_frozen)
5551                 return;
5552
5553         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5554                 struct amdgpu_ring *ring = adev->rings[i];
5555
5556                 if (!ring || !ring->sched.thread)
5557                         continue;
5558
5559
5560                 drm_sched_resubmit_jobs(&ring->sched);
5561                 drm_sched_start(&ring->sched, true);
5562         }
5563
5564         amdgpu_device_unlock_adev(adev);
5565 }
5566
5567 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5568 {
5569         struct drm_device *dev = pci_get_drvdata(pdev);
5570         struct amdgpu_device *adev = drm_to_adev(dev);
5571         int r;
5572
5573         r = pci_save_state(pdev);
5574         if (!r) {
5575                 kfree(adev->pci_state);
5576
5577                 adev->pci_state = pci_store_saved_state(pdev);
5578
5579                 if (!adev->pci_state) {
5580                         DRM_ERROR("Failed to store PCI saved state");
5581                         return false;
5582                 }
5583         } else {
5584                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5585                 return false;
5586         }
5587
5588         return true;
5589 }
5590
5591 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5592 {
5593         struct drm_device *dev = pci_get_drvdata(pdev);
5594         struct amdgpu_device *adev = drm_to_adev(dev);
5595         int r;
5596
5597         if (!adev->pci_state)
5598                 return false;
5599
5600         r = pci_load_saved_state(pdev, adev->pci_state);
5601
5602         if (!r) {
5603                 pci_restore_state(pdev);
5604         } else {
5605                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5606                 return false;
5607         }
5608
5609         return true;
5610 }
5611
5612 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5613                 struct amdgpu_ring *ring)
5614 {
5615 #ifdef CONFIG_X86_64
5616         if (adev->flags & AMD_IS_APU)
5617                 return;
5618 #endif
5619         if (adev->gmc.xgmi.connected_to_cpu)
5620                 return;
5621
5622         if (ring && ring->funcs->emit_hdp_flush)
5623                 amdgpu_ring_emit_hdp_flush(ring);
5624         else
5625                 amdgpu_asic_flush_hdp(adev, ring);
5626 }
5627
5628 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5629                 struct amdgpu_ring *ring)
5630 {
5631 #ifdef CONFIG_X86_64
5632         if (adev->flags & AMD_IS_APU)
5633                 return;
5634 #endif
5635         if (adev->gmc.xgmi.connected_to_cpu)
5636                 return;
5637
5638         amdgpu_asic_invalidate_hdp(adev, ring);
5639 }
This page took 0.374594 seconds and 4 git commands to generate.