]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drm/amdgpu: revert "Add autodump debugfs node for gpu reset v8"
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
43 #include "atom.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
46 #include "amd_pcie.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
48 #include "si.h"
49 #endif
50 #ifdef CONFIG_DRM_AMDGPU_CIK
51 #include "cik.h"
52 #endif
53 #include "vi.h"
54 #include "soc15.h"
55 #include "nv.h"
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
60
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
63
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
67 #include "amdgpu_fru_eeprom.h"
68 #include "amdgpu_reset.h"
69
70 #include <linux/suspend.h>
71 #include <drm/task_barrier.h>
72 #include <linux/pm_runtime.h>
73
74 #include <drm/drm_drv.h>
75
76 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
87 MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin");
88
89 #define AMDGPU_RESUME_MS                2000
90
91 const char *amdgpu_asic_name[] = {
92         "TAHITI",
93         "PITCAIRN",
94         "VERDE",
95         "OLAND",
96         "HAINAN",
97         "BONAIRE",
98         "KAVERI",
99         "KABINI",
100         "HAWAII",
101         "MULLINS",
102         "TOPAZ",
103         "TONGA",
104         "FIJI",
105         "CARRIZO",
106         "STONEY",
107         "POLARIS10",
108         "POLARIS11",
109         "POLARIS12",
110         "VEGAM",
111         "VEGA10",
112         "VEGA12",
113         "VEGA20",
114         "RAVEN",
115         "ARCTURUS",
116         "RENOIR",
117         "ALDEBARAN",
118         "NAVI10",
119         "CYAN_SKILLFISH",
120         "NAVI14",
121         "NAVI12",
122         "SIENNA_CICHLID",
123         "NAVY_FLOUNDER",
124         "VANGOGH",
125         "DIMGREY_CAVEFISH",
126         "BEIGE_GOBY",
127         "YELLOW_CARP",
128         "IP DISCOVERY",
129         "LAST",
130 };
131
132 /**
133  * DOC: pcie_replay_count
134  *
135  * The amdgpu driver provides a sysfs API for reporting the total number
136  * of PCIe replays (NAKs)
137  * The file pcie_replay_count is used for this and returns the total
138  * number of replays as a sum of the NAKs generated and NAKs received
139  */
140
141 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
142                 struct device_attribute *attr, char *buf)
143 {
144         struct drm_device *ddev = dev_get_drvdata(dev);
145         struct amdgpu_device *adev = drm_to_adev(ddev);
146         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
147
148         return sysfs_emit(buf, "%llu\n", cnt);
149 }
150
151 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
152                 amdgpu_device_get_pcie_replay_count, NULL);
153
154 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
155
156 /**
157  * DOC: product_name
158  *
159  * The amdgpu driver provides a sysfs API for reporting the product name
160  * for the device
161  * The file serial_number is used for this and returns the product name
162  * as returned from the FRU.
163  * NOTE: This is only available for certain server cards
164  */
165
166 static ssize_t amdgpu_device_get_product_name(struct device *dev,
167                 struct device_attribute *attr, char *buf)
168 {
169         struct drm_device *ddev = dev_get_drvdata(dev);
170         struct amdgpu_device *adev = drm_to_adev(ddev);
171
172         return sysfs_emit(buf, "%s\n", adev->product_name);
173 }
174
175 static DEVICE_ATTR(product_name, S_IRUGO,
176                 amdgpu_device_get_product_name, NULL);
177
178 /**
179  * DOC: product_number
180  *
181  * The amdgpu driver provides a sysfs API for reporting the part number
182  * for the device
183  * The file serial_number is used for this and returns the part number
184  * as returned from the FRU.
185  * NOTE: This is only available for certain server cards
186  */
187
188 static ssize_t amdgpu_device_get_product_number(struct device *dev,
189                 struct device_attribute *attr, char *buf)
190 {
191         struct drm_device *ddev = dev_get_drvdata(dev);
192         struct amdgpu_device *adev = drm_to_adev(ddev);
193
194         return sysfs_emit(buf, "%s\n", adev->product_number);
195 }
196
197 static DEVICE_ATTR(product_number, S_IRUGO,
198                 amdgpu_device_get_product_number, NULL);
199
200 /**
201  * DOC: serial_number
202  *
203  * The amdgpu driver provides a sysfs API for reporting the serial number
204  * for the device
205  * The file serial_number is used for this and returns the serial number
206  * as returned from the FRU.
207  * NOTE: This is only available for certain server cards
208  */
209
210 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
211                 struct device_attribute *attr, char *buf)
212 {
213         struct drm_device *ddev = dev_get_drvdata(dev);
214         struct amdgpu_device *adev = drm_to_adev(ddev);
215
216         return sysfs_emit(buf, "%s\n", adev->serial);
217 }
218
219 static DEVICE_ATTR(serial_number, S_IRUGO,
220                 amdgpu_device_get_serial_number, NULL);
221
222 /**
223  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
224  *
225  * @dev: drm_device pointer
226  *
227  * Returns true if the device is a dGPU with ATPX power control,
228  * otherwise return false.
229  */
230 bool amdgpu_device_supports_px(struct drm_device *dev)
231 {
232         struct amdgpu_device *adev = drm_to_adev(dev);
233
234         if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
235                 return true;
236         return false;
237 }
238
239 /**
240  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
241  *
242  * @dev: drm_device pointer
243  *
244  * Returns true if the device is a dGPU with ACPI power control,
245  * otherwise return false.
246  */
247 bool amdgpu_device_supports_boco(struct drm_device *dev)
248 {
249         struct amdgpu_device *adev = drm_to_adev(dev);
250
251         if (adev->has_pr3 ||
252             ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
253                 return true;
254         return false;
255 }
256
257 /**
258  * amdgpu_device_supports_baco - Does the device support BACO
259  *
260  * @dev: drm_device pointer
261  *
262  * Returns true if the device supporte BACO,
263  * otherwise return false.
264  */
265 bool amdgpu_device_supports_baco(struct drm_device *dev)
266 {
267         struct amdgpu_device *adev = drm_to_adev(dev);
268
269         return amdgpu_asic_supports_baco(adev);
270 }
271
272 /**
273  * amdgpu_device_supports_smart_shift - Is the device dGPU with
274  * smart shift support
275  *
276  * @dev: drm_device pointer
277  *
278  * Returns true if the device is a dGPU with Smart Shift support,
279  * otherwise returns false.
280  */
281 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
282 {
283         return (amdgpu_device_supports_boco(dev) &&
284                 amdgpu_acpi_is_power_shift_control_supported());
285 }
286
287 /*
288  * VRAM access helper functions
289  */
290
291 /**
292  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
293  *
294  * @adev: amdgpu_device pointer
295  * @pos: offset of the buffer in vram
296  * @buf: virtual address of the buffer in system memory
297  * @size: read/write size, sizeof(@buf) must > @size
298  * @write: true - write to vram, otherwise - read from vram
299  */
300 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
301                              void *buf, size_t size, bool write)
302 {
303         unsigned long flags;
304         uint32_t hi = ~0, tmp = 0;
305         uint32_t *data = buf;
306         uint64_t last;
307         int idx;
308
309         if (!drm_dev_enter(&adev->ddev, &idx))
310                 return;
311
312         BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
313
314         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
315         for (last = pos + size; pos < last; pos += 4) {
316                 tmp = pos >> 31;
317
318                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
319                 if (tmp != hi) {
320                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
321                         hi = tmp;
322                 }
323                 if (write)
324                         WREG32_NO_KIQ(mmMM_DATA, *data++);
325                 else
326                         *data++ = RREG32_NO_KIQ(mmMM_DATA);
327         }
328
329         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
330         drm_dev_exit(idx);
331 }
332
333 /**
334  * amdgpu_device_vram_access - access vram by vram aperature
335  *
336  * @adev: amdgpu_device pointer
337  * @pos: offset of the buffer in vram
338  * @buf: virtual address of the buffer in system memory
339  * @size: read/write size, sizeof(@buf) must > @size
340  * @write: true - write to vram, otherwise - read from vram
341  *
342  * The return value means how many bytes have been transferred.
343  */
344 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
345                                  void *buf, size_t size, bool write)
346 {
347 #ifdef CONFIG_64BIT
348         void __iomem *addr;
349         size_t count = 0;
350         uint64_t last;
351
352         if (!adev->mman.aper_base_kaddr)
353                 return 0;
354
355         last = min(pos + size, adev->gmc.visible_vram_size);
356         if (last > pos) {
357                 addr = adev->mman.aper_base_kaddr + pos;
358                 count = last - pos;
359
360                 if (write) {
361                         memcpy_toio(addr, buf, count);
362                         mb();
363                         amdgpu_device_flush_hdp(adev, NULL);
364                 } else {
365                         amdgpu_device_invalidate_hdp(adev, NULL);
366                         mb();
367                         memcpy_fromio(buf, addr, count);
368                 }
369
370         }
371
372         return count;
373 #else
374         return 0;
375 #endif
376 }
377
378 /**
379  * amdgpu_device_vram_access - read/write a buffer in vram
380  *
381  * @adev: amdgpu_device pointer
382  * @pos: offset of the buffer in vram
383  * @buf: virtual address of the buffer in system memory
384  * @size: read/write size, sizeof(@buf) must > @size
385  * @write: true - write to vram, otherwise - read from vram
386  */
387 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
388                                void *buf, size_t size, bool write)
389 {
390         size_t count;
391
392         /* try to using vram apreature to access vram first */
393         count = amdgpu_device_aper_access(adev, pos, buf, size, write);
394         size -= count;
395         if (size) {
396                 /* using MM to access rest vram */
397                 pos += count;
398                 buf += count;
399                 amdgpu_device_mm_access(adev, pos, buf, size, write);
400         }
401 }
402
403 /*
404  * register access helper functions.
405  */
406
407 /* Check if hw access should be skipped because of hotplug or device error */
408 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
409 {
410         if (adev->no_hw_access)
411                 return true;
412
413 #ifdef CONFIG_LOCKDEP
414         /*
415          * This is a bit complicated to understand, so worth a comment. What we assert
416          * here is that the GPU reset is not running on another thread in parallel.
417          *
418          * For this we trylock the read side of the reset semaphore, if that succeeds
419          * we know that the reset is not running in paralell.
420          *
421          * If the trylock fails we assert that we are either already holding the read
422          * side of the lock or are the reset thread itself and hold the write side of
423          * the lock.
424          */
425         if (in_task()) {
426                 if (down_read_trylock(&adev->reset_sem))
427                         up_read(&adev->reset_sem);
428                 else
429                         lockdep_assert_held(&adev->reset_sem);
430         }
431 #endif
432         return false;
433 }
434
435 /**
436  * amdgpu_device_rreg - read a memory mapped IO or indirect register
437  *
438  * @adev: amdgpu_device pointer
439  * @reg: dword aligned register offset
440  * @acc_flags: access flags which require special behavior
441  *
442  * Returns the 32 bit value from the offset specified.
443  */
444 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
445                             uint32_t reg, uint32_t acc_flags)
446 {
447         uint32_t ret;
448
449         if (amdgpu_device_skip_hw_access(adev))
450                 return 0;
451
452         if ((reg * 4) < adev->rmmio_size) {
453                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
454                     amdgpu_sriov_runtime(adev) &&
455                     down_read_trylock(&adev->reset_sem)) {
456                         ret = amdgpu_kiq_rreg(adev, reg);
457                         up_read(&adev->reset_sem);
458                 } else {
459                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
460                 }
461         } else {
462                 ret = adev->pcie_rreg(adev, reg * 4);
463         }
464
465         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
466
467         return ret;
468 }
469
470 /*
471  * MMIO register read with bytes helper functions
472  * @offset:bytes offset from MMIO start
473  *
474 */
475
476 /**
477  * amdgpu_mm_rreg8 - read a memory mapped IO register
478  *
479  * @adev: amdgpu_device pointer
480  * @offset: byte aligned register offset
481  *
482  * Returns the 8 bit value from the offset specified.
483  */
484 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
485 {
486         if (amdgpu_device_skip_hw_access(adev))
487                 return 0;
488
489         if (offset < adev->rmmio_size)
490                 return (readb(adev->rmmio + offset));
491         BUG();
492 }
493
494 /*
495  * MMIO register write with bytes helper functions
496  * @offset:bytes offset from MMIO start
497  * @value: the value want to be written to the register
498  *
499 */
500 /**
501  * amdgpu_mm_wreg8 - read a memory mapped IO register
502  *
503  * @adev: amdgpu_device pointer
504  * @offset: byte aligned register offset
505  * @value: 8 bit value to write
506  *
507  * Writes the value specified to the offset specified.
508  */
509 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
510 {
511         if (amdgpu_device_skip_hw_access(adev))
512                 return;
513
514         if (offset < adev->rmmio_size)
515                 writeb(value, adev->rmmio + offset);
516         else
517                 BUG();
518 }
519
520 /**
521  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
522  *
523  * @adev: amdgpu_device pointer
524  * @reg: dword aligned register offset
525  * @v: 32 bit value to write to the register
526  * @acc_flags: access flags which require special behavior
527  *
528  * Writes the value specified to the offset specified.
529  */
530 void amdgpu_device_wreg(struct amdgpu_device *adev,
531                         uint32_t reg, uint32_t v,
532                         uint32_t acc_flags)
533 {
534         if (amdgpu_device_skip_hw_access(adev))
535                 return;
536
537         if ((reg * 4) < adev->rmmio_size) {
538                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
539                     amdgpu_sriov_runtime(adev) &&
540                     down_read_trylock(&adev->reset_sem)) {
541                         amdgpu_kiq_wreg(adev, reg, v);
542                         up_read(&adev->reset_sem);
543                 } else {
544                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
545                 }
546         } else {
547                 adev->pcie_wreg(adev, reg * 4, v);
548         }
549
550         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
551 }
552
553 /*
554  * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range
555  *
556  * this function is invoked only the debugfs register access
557  * */
558 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
559                              uint32_t reg, uint32_t v)
560 {
561         if (amdgpu_device_skip_hw_access(adev))
562                 return;
563
564         if (amdgpu_sriov_fullaccess(adev) &&
565             adev->gfx.rlc.funcs &&
566             adev->gfx.rlc.funcs->is_rlcg_access_range) {
567                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
568                         return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
569         } else {
570                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
571         }
572 }
573
574 /**
575  * amdgpu_mm_rdoorbell - read a doorbell dword
576  *
577  * @adev: amdgpu_device pointer
578  * @index: doorbell index
579  *
580  * Returns the value in the doorbell aperture at the
581  * requested doorbell index (CIK).
582  */
583 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
584 {
585         if (amdgpu_device_skip_hw_access(adev))
586                 return 0;
587
588         if (index < adev->doorbell.num_doorbells) {
589                 return readl(adev->doorbell.ptr + index);
590         } else {
591                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
592                 return 0;
593         }
594 }
595
596 /**
597  * amdgpu_mm_wdoorbell - write a doorbell dword
598  *
599  * @adev: amdgpu_device pointer
600  * @index: doorbell index
601  * @v: value to write
602  *
603  * Writes @v to the doorbell aperture at the
604  * requested doorbell index (CIK).
605  */
606 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
607 {
608         if (amdgpu_device_skip_hw_access(adev))
609                 return;
610
611         if (index < adev->doorbell.num_doorbells) {
612                 writel(v, adev->doorbell.ptr + index);
613         } else {
614                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
615         }
616 }
617
618 /**
619  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
620  *
621  * @adev: amdgpu_device pointer
622  * @index: doorbell index
623  *
624  * Returns the value in the doorbell aperture at the
625  * requested doorbell index (VEGA10+).
626  */
627 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
628 {
629         if (amdgpu_device_skip_hw_access(adev))
630                 return 0;
631
632         if (index < adev->doorbell.num_doorbells) {
633                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
634         } else {
635                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
636                 return 0;
637         }
638 }
639
640 /**
641  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
642  *
643  * @adev: amdgpu_device pointer
644  * @index: doorbell index
645  * @v: value to write
646  *
647  * Writes @v to the doorbell aperture at the
648  * requested doorbell index (VEGA10+).
649  */
650 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
651 {
652         if (amdgpu_device_skip_hw_access(adev))
653                 return;
654
655         if (index < adev->doorbell.num_doorbells) {
656                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
657         } else {
658                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
659         }
660 }
661
662 /**
663  * amdgpu_device_indirect_rreg - read an indirect register
664  *
665  * @adev: amdgpu_device pointer
666  * @pcie_index: mmio register offset
667  * @pcie_data: mmio register offset
668  * @reg_addr: indirect register address to read from
669  *
670  * Returns the value of indirect register @reg_addr
671  */
672 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
673                                 u32 pcie_index, u32 pcie_data,
674                                 u32 reg_addr)
675 {
676         unsigned long flags;
677         u32 r;
678         void __iomem *pcie_index_offset;
679         void __iomem *pcie_data_offset;
680
681         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
682         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
683         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
684
685         writel(reg_addr, pcie_index_offset);
686         readl(pcie_index_offset);
687         r = readl(pcie_data_offset);
688         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
689
690         return r;
691 }
692
693 /**
694  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
695  *
696  * @adev: amdgpu_device pointer
697  * @pcie_index: mmio register offset
698  * @pcie_data: mmio register offset
699  * @reg_addr: indirect register address to read from
700  *
701  * Returns the value of indirect register @reg_addr
702  */
703 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
704                                   u32 pcie_index, u32 pcie_data,
705                                   u32 reg_addr)
706 {
707         unsigned long flags;
708         u64 r;
709         void __iomem *pcie_index_offset;
710         void __iomem *pcie_data_offset;
711
712         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
713         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
714         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
715
716         /* read low 32 bits */
717         writel(reg_addr, pcie_index_offset);
718         readl(pcie_index_offset);
719         r = readl(pcie_data_offset);
720         /* read high 32 bits */
721         writel(reg_addr + 4, pcie_index_offset);
722         readl(pcie_index_offset);
723         r |= ((u64)readl(pcie_data_offset) << 32);
724         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
725
726         return r;
727 }
728
729 /**
730  * amdgpu_device_indirect_wreg - write an indirect register address
731  *
732  * @adev: amdgpu_device pointer
733  * @pcie_index: mmio register offset
734  * @pcie_data: mmio register offset
735  * @reg_addr: indirect register offset
736  * @reg_data: indirect register data
737  *
738  */
739 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
740                                  u32 pcie_index, u32 pcie_data,
741                                  u32 reg_addr, u32 reg_data)
742 {
743         unsigned long flags;
744         void __iomem *pcie_index_offset;
745         void __iomem *pcie_data_offset;
746
747         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
748         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
749         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
750
751         writel(reg_addr, pcie_index_offset);
752         readl(pcie_index_offset);
753         writel(reg_data, pcie_data_offset);
754         readl(pcie_data_offset);
755         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
756 }
757
758 /**
759  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
760  *
761  * @adev: amdgpu_device pointer
762  * @pcie_index: mmio register offset
763  * @pcie_data: mmio register offset
764  * @reg_addr: indirect register offset
765  * @reg_data: indirect register data
766  *
767  */
768 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
769                                    u32 pcie_index, u32 pcie_data,
770                                    u32 reg_addr, u64 reg_data)
771 {
772         unsigned long flags;
773         void __iomem *pcie_index_offset;
774         void __iomem *pcie_data_offset;
775
776         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
777         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
778         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
779
780         /* write low 32 bits */
781         writel(reg_addr, pcie_index_offset);
782         readl(pcie_index_offset);
783         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
784         readl(pcie_data_offset);
785         /* write high 32 bits */
786         writel(reg_addr + 4, pcie_index_offset);
787         readl(pcie_index_offset);
788         writel((u32)(reg_data >> 32), pcie_data_offset);
789         readl(pcie_data_offset);
790         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
791 }
792
793 /**
794  * amdgpu_invalid_rreg - dummy reg read function
795  *
796  * @adev: amdgpu_device pointer
797  * @reg: offset of register
798  *
799  * Dummy register read function.  Used for register blocks
800  * that certain asics don't have (all asics).
801  * Returns the value in the register.
802  */
803 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
804 {
805         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
806         BUG();
807         return 0;
808 }
809
810 /**
811  * amdgpu_invalid_wreg - dummy reg write function
812  *
813  * @adev: amdgpu_device pointer
814  * @reg: offset of register
815  * @v: value to write to the register
816  *
817  * Dummy register read function.  Used for register blocks
818  * that certain asics don't have (all asics).
819  */
820 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
821 {
822         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
823                   reg, v);
824         BUG();
825 }
826
827 /**
828  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
829  *
830  * @adev: amdgpu_device pointer
831  * @reg: offset of register
832  *
833  * Dummy register read function.  Used for register blocks
834  * that certain asics don't have (all asics).
835  * Returns the value in the register.
836  */
837 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
838 {
839         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
840         BUG();
841         return 0;
842 }
843
844 /**
845  * amdgpu_invalid_wreg64 - dummy reg write function
846  *
847  * @adev: amdgpu_device pointer
848  * @reg: offset of register
849  * @v: value to write to the register
850  *
851  * Dummy register read function.  Used for register blocks
852  * that certain asics don't have (all asics).
853  */
854 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
855 {
856         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
857                   reg, v);
858         BUG();
859 }
860
861 /**
862  * amdgpu_block_invalid_rreg - dummy reg read function
863  *
864  * @adev: amdgpu_device pointer
865  * @block: offset of instance
866  * @reg: offset of register
867  *
868  * Dummy register read function.  Used for register blocks
869  * that certain asics don't have (all asics).
870  * Returns the value in the register.
871  */
872 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
873                                           uint32_t block, uint32_t reg)
874 {
875         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
876                   reg, block);
877         BUG();
878         return 0;
879 }
880
881 /**
882  * amdgpu_block_invalid_wreg - dummy reg write function
883  *
884  * @adev: amdgpu_device pointer
885  * @block: offset of instance
886  * @reg: offset of register
887  * @v: value to write to the register
888  *
889  * Dummy register read function.  Used for register blocks
890  * that certain asics don't have (all asics).
891  */
892 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
893                                       uint32_t block,
894                                       uint32_t reg, uint32_t v)
895 {
896         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
897                   reg, block, v);
898         BUG();
899 }
900
901 /**
902  * amdgpu_device_asic_init - Wrapper for atom asic_init
903  *
904  * @adev: amdgpu_device pointer
905  *
906  * Does any asic specific work and then calls atom asic init.
907  */
908 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
909 {
910         amdgpu_asic_pre_asic_init(adev);
911
912         return amdgpu_atom_asic_init(adev->mode_info.atom_context);
913 }
914
915 /**
916  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
917  *
918  * @adev: amdgpu_device pointer
919  *
920  * Allocates a scratch page of VRAM for use by various things in the
921  * driver.
922  */
923 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
924 {
925         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
926                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
927                                        &adev->vram_scratch.robj,
928                                        &adev->vram_scratch.gpu_addr,
929                                        (void **)&adev->vram_scratch.ptr);
930 }
931
932 /**
933  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
934  *
935  * @adev: amdgpu_device pointer
936  *
937  * Frees the VRAM scratch page.
938  */
939 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
940 {
941         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
942 }
943
944 /**
945  * amdgpu_device_program_register_sequence - program an array of registers.
946  *
947  * @adev: amdgpu_device pointer
948  * @registers: pointer to the register array
949  * @array_size: size of the register array
950  *
951  * Programs an array or registers with and and or masks.
952  * This is a helper for setting golden registers.
953  */
954 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
955                                              const u32 *registers,
956                                              const u32 array_size)
957 {
958         u32 tmp, reg, and_mask, or_mask;
959         int i;
960
961         if (array_size % 3)
962                 return;
963
964         for (i = 0; i < array_size; i +=3) {
965                 reg = registers[i + 0];
966                 and_mask = registers[i + 1];
967                 or_mask = registers[i + 2];
968
969                 if (and_mask == 0xffffffff) {
970                         tmp = or_mask;
971                 } else {
972                         tmp = RREG32(reg);
973                         tmp &= ~and_mask;
974                         if (adev->family >= AMDGPU_FAMILY_AI)
975                                 tmp |= (or_mask & and_mask);
976                         else
977                                 tmp |= or_mask;
978                 }
979                 WREG32(reg, tmp);
980         }
981 }
982
983 /**
984  * amdgpu_device_pci_config_reset - reset the GPU
985  *
986  * @adev: amdgpu_device pointer
987  *
988  * Resets the GPU using the pci config reset sequence.
989  * Only applicable to asics prior to vega10.
990  */
991 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
992 {
993         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
994 }
995
996 /**
997  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
998  *
999  * @adev: amdgpu_device pointer
1000  *
1001  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1002  */
1003 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1004 {
1005         return pci_reset_function(adev->pdev);
1006 }
1007
1008 /*
1009  * GPU doorbell aperture helpers function.
1010  */
1011 /**
1012  * amdgpu_device_doorbell_init - Init doorbell driver information.
1013  *
1014  * @adev: amdgpu_device pointer
1015  *
1016  * Init doorbell driver information (CIK)
1017  * Returns 0 on success, error on failure.
1018  */
1019 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1020 {
1021
1022         /* No doorbell on SI hardware generation */
1023         if (adev->asic_type < CHIP_BONAIRE) {
1024                 adev->doorbell.base = 0;
1025                 adev->doorbell.size = 0;
1026                 adev->doorbell.num_doorbells = 0;
1027                 adev->doorbell.ptr = NULL;
1028                 return 0;
1029         }
1030
1031         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1032                 return -EINVAL;
1033
1034         amdgpu_asic_init_doorbell_index(adev);
1035
1036         /* doorbell bar mapping */
1037         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1038         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1039
1040         adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
1041                                              adev->doorbell_index.max_assignment+1);
1042         if (adev->doorbell.num_doorbells == 0)
1043                 return -EINVAL;
1044
1045         /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1046          * paging queue doorbell use the second page. The
1047          * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1048          * doorbells are in the first page. So with paging queue enabled,
1049          * the max num_doorbells should + 1 page (0x400 in dword)
1050          */
1051         if (adev->asic_type >= CHIP_VEGA10)
1052                 adev->doorbell.num_doorbells += 0x400;
1053
1054         adev->doorbell.ptr = ioremap(adev->doorbell.base,
1055                                      adev->doorbell.num_doorbells *
1056                                      sizeof(u32));
1057         if (adev->doorbell.ptr == NULL)
1058                 return -ENOMEM;
1059
1060         return 0;
1061 }
1062
1063 /**
1064  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1065  *
1066  * @adev: amdgpu_device pointer
1067  *
1068  * Tear down doorbell driver information (CIK)
1069  */
1070 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1071 {
1072         iounmap(adev->doorbell.ptr);
1073         adev->doorbell.ptr = NULL;
1074 }
1075
1076
1077
1078 /*
1079  * amdgpu_device_wb_*()
1080  * Writeback is the method by which the GPU updates special pages in memory
1081  * with the status of certain GPU events (fences, ring pointers,etc.).
1082  */
1083
1084 /**
1085  * amdgpu_device_wb_fini - Disable Writeback and free memory
1086  *
1087  * @adev: amdgpu_device pointer
1088  *
1089  * Disables Writeback and frees the Writeback memory (all asics).
1090  * Used at driver shutdown.
1091  */
1092 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1093 {
1094         if (adev->wb.wb_obj) {
1095                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1096                                       &adev->wb.gpu_addr,
1097                                       (void **)&adev->wb.wb);
1098                 adev->wb.wb_obj = NULL;
1099         }
1100 }
1101
1102 /**
1103  * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
1104  *
1105  * @adev: amdgpu_device pointer
1106  *
1107  * Initializes writeback and allocates writeback memory (all asics).
1108  * Used at driver startup.
1109  * Returns 0 on success or an -error on failure.
1110  */
1111 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1112 {
1113         int r;
1114
1115         if (adev->wb.wb_obj == NULL) {
1116                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1117                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1118                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1119                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1120                                             (void **)&adev->wb.wb);
1121                 if (r) {
1122                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1123                         return r;
1124                 }
1125
1126                 adev->wb.num_wb = AMDGPU_MAX_WB;
1127                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1128
1129                 /* clear wb memory */
1130                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1131         }
1132
1133         return 0;
1134 }
1135
1136 /**
1137  * amdgpu_device_wb_get - Allocate a wb entry
1138  *
1139  * @adev: amdgpu_device pointer
1140  * @wb: wb index
1141  *
1142  * Allocate a wb slot for use by the driver (all asics).
1143  * Returns 0 on success or -EINVAL on failure.
1144  */
1145 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1146 {
1147         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1148
1149         if (offset < adev->wb.num_wb) {
1150                 __set_bit(offset, adev->wb.used);
1151                 *wb = offset << 3; /* convert to dw offset */
1152                 return 0;
1153         } else {
1154                 return -EINVAL;
1155         }
1156 }
1157
1158 /**
1159  * amdgpu_device_wb_free - Free a wb entry
1160  *
1161  * @adev: amdgpu_device pointer
1162  * @wb: wb index
1163  *
1164  * Free a wb slot allocated for use by the driver (all asics)
1165  */
1166 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1167 {
1168         wb >>= 3;
1169         if (wb < adev->wb.num_wb)
1170                 __clear_bit(wb, adev->wb.used);
1171 }
1172
1173 /**
1174  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1175  *
1176  * @adev: amdgpu_device pointer
1177  *
1178  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1179  * to fail, but if any of the BARs is not accessible after the size we abort
1180  * driver loading by returning -ENODEV.
1181  */
1182 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1183 {
1184         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1185         struct pci_bus *root;
1186         struct resource *res;
1187         unsigned i;
1188         u16 cmd;
1189         int r;
1190
1191         /* Bypass for VF */
1192         if (amdgpu_sriov_vf(adev))
1193                 return 0;
1194
1195         /* skip if the bios has already enabled large BAR */
1196         if (adev->gmc.real_vram_size &&
1197             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1198                 return 0;
1199
1200         /* Check if the root BUS has 64bit memory resources */
1201         root = adev->pdev->bus;
1202         while (root->parent)
1203                 root = root->parent;
1204
1205         pci_bus_for_each_resource(root, res, i) {
1206                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1207                     res->start > 0x100000000ull)
1208                         break;
1209         }
1210
1211         /* Trying to resize is pointless without a root hub window above 4GB */
1212         if (!res)
1213                 return 0;
1214
1215         /* Limit the BAR size to what is available */
1216         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1217                         rbar_size);
1218
1219         /* Disable memory decoding while we change the BAR addresses and size */
1220         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1221         pci_write_config_word(adev->pdev, PCI_COMMAND,
1222                               cmd & ~PCI_COMMAND_MEMORY);
1223
1224         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1225         amdgpu_device_doorbell_fini(adev);
1226         if (adev->asic_type >= CHIP_BONAIRE)
1227                 pci_release_resource(adev->pdev, 2);
1228
1229         pci_release_resource(adev->pdev, 0);
1230
1231         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1232         if (r == -ENOSPC)
1233                 DRM_INFO("Not enough PCI address space for a large BAR.");
1234         else if (r && r != -ENOTSUPP)
1235                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1236
1237         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1238
1239         /* When the doorbell or fb BAR isn't available we have no chance of
1240          * using the device.
1241          */
1242         r = amdgpu_device_doorbell_init(adev);
1243         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1244                 return -ENODEV;
1245
1246         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1247
1248         return 0;
1249 }
1250
1251 /*
1252  * GPU helpers function.
1253  */
1254 /**
1255  * amdgpu_device_need_post - check if the hw need post or not
1256  *
1257  * @adev: amdgpu_device pointer
1258  *
1259  * Check if the asic has been initialized (all asics) at driver startup
1260  * or post is needed if  hw reset is performed.
1261  * Returns true if need or false if not.
1262  */
1263 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1264 {
1265         uint32_t reg;
1266
1267         if (amdgpu_sriov_vf(adev))
1268                 return false;
1269
1270         if (amdgpu_passthrough(adev)) {
1271                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1272                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1273                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1274                  * vpost executed for smc version below 22.15
1275                  */
1276                 if (adev->asic_type == CHIP_FIJI) {
1277                         int err;
1278                         uint32_t fw_ver;
1279                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1280                         /* force vPost if error occured */
1281                         if (err)
1282                                 return true;
1283
1284                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1285                         if (fw_ver < 0x00160e00)
1286                                 return true;
1287                 }
1288         }
1289
1290         /* Don't post if we need to reset whole hive on init */
1291         if (adev->gmc.xgmi.pending_reset)
1292                 return false;
1293
1294         if (adev->has_hw_reset) {
1295                 adev->has_hw_reset = false;
1296                 return true;
1297         }
1298
1299         /* bios scratch used on CIK+ */
1300         if (adev->asic_type >= CHIP_BONAIRE)
1301                 return amdgpu_atombios_scratch_need_asic_init(adev);
1302
1303         /* check MEM_SIZE for older asics */
1304         reg = amdgpu_asic_get_config_memsize(adev);
1305
1306         if ((reg != 0) && (reg != 0xffffffff))
1307                 return false;
1308
1309         return true;
1310 }
1311
1312 /* if we get transitioned to only one device, take VGA back */
1313 /**
1314  * amdgpu_device_vga_set_decode - enable/disable vga decode
1315  *
1316  * @pdev: PCI device pointer
1317  * @state: enable/disable vga decode
1318  *
1319  * Enable/disable vga decode (all asics).
1320  * Returns VGA resource flags.
1321  */
1322 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1323                 bool state)
1324 {
1325         struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1326         amdgpu_asic_set_vga_state(adev, state);
1327         if (state)
1328                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1329                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1330         else
1331                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1332 }
1333
1334 /**
1335  * amdgpu_device_check_block_size - validate the vm block size
1336  *
1337  * @adev: amdgpu_device pointer
1338  *
1339  * Validates the vm block size specified via module parameter.
1340  * The vm block size defines number of bits in page table versus page directory,
1341  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1342  * page table and the remaining bits are in the page directory.
1343  */
1344 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1345 {
1346         /* defines number of bits in page table versus page directory,
1347          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1348          * page table and the remaining bits are in the page directory */
1349         if (amdgpu_vm_block_size == -1)
1350                 return;
1351
1352         if (amdgpu_vm_block_size < 9) {
1353                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1354                          amdgpu_vm_block_size);
1355                 amdgpu_vm_block_size = -1;
1356         }
1357 }
1358
1359 /**
1360  * amdgpu_device_check_vm_size - validate the vm size
1361  *
1362  * @adev: amdgpu_device pointer
1363  *
1364  * Validates the vm size in GB specified via module parameter.
1365  * The VM size is the size of the GPU virtual memory space in GB.
1366  */
1367 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1368 {
1369         /* no need to check the default value */
1370         if (amdgpu_vm_size == -1)
1371                 return;
1372
1373         if (amdgpu_vm_size < 1) {
1374                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1375                          amdgpu_vm_size);
1376                 amdgpu_vm_size = -1;
1377         }
1378 }
1379
1380 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1381 {
1382         struct sysinfo si;
1383         bool is_os_64 = (sizeof(void *) == 8);
1384         uint64_t total_memory;
1385         uint64_t dram_size_seven_GB = 0x1B8000000;
1386         uint64_t dram_size_three_GB = 0xB8000000;
1387
1388         if (amdgpu_smu_memory_pool_size == 0)
1389                 return;
1390
1391         if (!is_os_64) {
1392                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1393                 goto def_value;
1394         }
1395         si_meminfo(&si);
1396         total_memory = (uint64_t)si.totalram * si.mem_unit;
1397
1398         if ((amdgpu_smu_memory_pool_size == 1) ||
1399                 (amdgpu_smu_memory_pool_size == 2)) {
1400                 if (total_memory < dram_size_three_GB)
1401                         goto def_value1;
1402         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1403                 (amdgpu_smu_memory_pool_size == 8)) {
1404                 if (total_memory < dram_size_seven_GB)
1405                         goto def_value1;
1406         } else {
1407                 DRM_WARN("Smu memory pool size not supported\n");
1408                 goto def_value;
1409         }
1410         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1411
1412         return;
1413
1414 def_value1:
1415         DRM_WARN("No enough system memory\n");
1416 def_value:
1417         adev->pm.smu_prv_buffer_size = 0;
1418 }
1419
1420 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1421 {
1422         if (!(adev->flags & AMD_IS_APU) ||
1423             adev->asic_type < CHIP_RAVEN)
1424                 return 0;
1425
1426         switch (adev->asic_type) {
1427         case CHIP_RAVEN:
1428                 if (adev->pdev->device == 0x15dd)
1429                         adev->apu_flags |= AMD_APU_IS_RAVEN;
1430                 if (adev->pdev->device == 0x15d8)
1431                         adev->apu_flags |= AMD_APU_IS_PICASSO;
1432                 break;
1433         case CHIP_RENOIR:
1434                 if ((adev->pdev->device == 0x1636) ||
1435                     (adev->pdev->device == 0x164c))
1436                         adev->apu_flags |= AMD_APU_IS_RENOIR;
1437                 else
1438                         adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1439                 break;
1440         case CHIP_VANGOGH:
1441                 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1442                 break;
1443         case CHIP_YELLOW_CARP:
1444                 break;
1445         case CHIP_CYAN_SKILLFISH:
1446                 if (adev->pdev->device == 0x13FE)
1447                         adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1448                 break;
1449         default:
1450                 return -EINVAL;
1451         }
1452
1453         return 0;
1454 }
1455
1456 /**
1457  * amdgpu_device_check_arguments - validate module params
1458  *
1459  * @adev: amdgpu_device pointer
1460  *
1461  * Validates certain module parameters and updates
1462  * the associated values used by the driver (all asics).
1463  */
1464 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1465 {
1466         if (amdgpu_sched_jobs < 4) {
1467                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1468                          amdgpu_sched_jobs);
1469                 amdgpu_sched_jobs = 4;
1470         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1471                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1472                          amdgpu_sched_jobs);
1473                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1474         }
1475
1476         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1477                 /* gart size must be greater or equal to 32M */
1478                 dev_warn(adev->dev, "gart size (%d) too small\n",
1479                          amdgpu_gart_size);
1480                 amdgpu_gart_size = -1;
1481         }
1482
1483         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1484                 /* gtt size must be greater or equal to 32M */
1485                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1486                                  amdgpu_gtt_size);
1487                 amdgpu_gtt_size = -1;
1488         }
1489
1490         /* valid range is between 4 and 9 inclusive */
1491         if (amdgpu_vm_fragment_size != -1 &&
1492             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1493                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1494                 amdgpu_vm_fragment_size = -1;
1495         }
1496
1497         if (amdgpu_sched_hw_submission < 2) {
1498                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1499                          amdgpu_sched_hw_submission);
1500                 amdgpu_sched_hw_submission = 2;
1501         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1502                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1503                          amdgpu_sched_hw_submission);
1504                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1505         }
1506
1507         amdgpu_device_check_smu_prv_buffer_size(adev);
1508
1509         amdgpu_device_check_vm_size(adev);
1510
1511         amdgpu_device_check_block_size(adev);
1512
1513         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1514
1515         amdgpu_gmc_tmz_set(adev);
1516
1517         amdgpu_gmc_noretry_set(adev);
1518
1519         return 0;
1520 }
1521
1522 /**
1523  * amdgpu_switcheroo_set_state - set switcheroo state
1524  *
1525  * @pdev: pci dev pointer
1526  * @state: vga_switcheroo state
1527  *
1528  * Callback for the switcheroo driver.  Suspends or resumes the
1529  * the asics before or after it is powered up using ACPI methods.
1530  */
1531 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1532                                         enum vga_switcheroo_state state)
1533 {
1534         struct drm_device *dev = pci_get_drvdata(pdev);
1535         int r;
1536
1537         if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1538                 return;
1539
1540         if (state == VGA_SWITCHEROO_ON) {
1541                 pr_info("switched on\n");
1542                 /* don't suspend or resume card normally */
1543                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1544
1545                 pci_set_power_state(pdev, PCI_D0);
1546                 amdgpu_device_load_pci_state(pdev);
1547                 r = pci_enable_device(pdev);
1548                 if (r)
1549                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1550                 amdgpu_device_resume(dev, true);
1551
1552                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1553         } else {
1554                 pr_info("switched off\n");
1555                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1556                 amdgpu_device_suspend(dev, true);
1557                 amdgpu_device_cache_pci_state(pdev);
1558                 /* Shut down the device */
1559                 pci_disable_device(pdev);
1560                 pci_set_power_state(pdev, PCI_D3cold);
1561                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1562         }
1563 }
1564
1565 /**
1566  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1567  *
1568  * @pdev: pci dev pointer
1569  *
1570  * Callback for the switcheroo driver.  Check of the switcheroo
1571  * state can be changed.
1572  * Returns true if the state can be changed, false if not.
1573  */
1574 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1575 {
1576         struct drm_device *dev = pci_get_drvdata(pdev);
1577
1578         /*
1579         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1580         * locking inversion with the driver load path. And the access here is
1581         * completely racy anyway. So don't bother with locking for now.
1582         */
1583         return atomic_read(&dev->open_count) == 0;
1584 }
1585
1586 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1587         .set_gpu_state = amdgpu_switcheroo_set_state,
1588         .reprobe = NULL,
1589         .can_switch = amdgpu_switcheroo_can_switch,
1590 };
1591
1592 /**
1593  * amdgpu_device_ip_set_clockgating_state - set the CG state
1594  *
1595  * @dev: amdgpu_device pointer
1596  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1597  * @state: clockgating state (gate or ungate)
1598  *
1599  * Sets the requested clockgating state for all instances of
1600  * the hardware IP specified.
1601  * Returns the error code from the last instance.
1602  */
1603 int amdgpu_device_ip_set_clockgating_state(void *dev,
1604                                            enum amd_ip_block_type block_type,
1605                                            enum amd_clockgating_state state)
1606 {
1607         struct amdgpu_device *adev = dev;
1608         int i, r = 0;
1609
1610         for (i = 0; i < adev->num_ip_blocks; i++) {
1611                 if (!adev->ip_blocks[i].status.valid)
1612                         continue;
1613                 if (adev->ip_blocks[i].version->type != block_type)
1614                         continue;
1615                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1616                         continue;
1617                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1618                         (void *)adev, state);
1619                 if (r)
1620                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1621                                   adev->ip_blocks[i].version->funcs->name, r);
1622         }
1623         return r;
1624 }
1625
1626 /**
1627  * amdgpu_device_ip_set_powergating_state - set the PG state
1628  *
1629  * @dev: amdgpu_device pointer
1630  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1631  * @state: powergating state (gate or ungate)
1632  *
1633  * Sets the requested powergating state for all instances of
1634  * the hardware IP specified.
1635  * Returns the error code from the last instance.
1636  */
1637 int amdgpu_device_ip_set_powergating_state(void *dev,
1638                                            enum amd_ip_block_type block_type,
1639                                            enum amd_powergating_state state)
1640 {
1641         struct amdgpu_device *adev = dev;
1642         int i, r = 0;
1643
1644         for (i = 0; i < adev->num_ip_blocks; i++) {
1645                 if (!adev->ip_blocks[i].status.valid)
1646                         continue;
1647                 if (adev->ip_blocks[i].version->type != block_type)
1648                         continue;
1649                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1650                         continue;
1651                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1652                         (void *)adev, state);
1653                 if (r)
1654                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1655                                   adev->ip_blocks[i].version->funcs->name, r);
1656         }
1657         return r;
1658 }
1659
1660 /**
1661  * amdgpu_device_ip_get_clockgating_state - get the CG state
1662  *
1663  * @adev: amdgpu_device pointer
1664  * @flags: clockgating feature flags
1665  *
1666  * Walks the list of IPs on the device and updates the clockgating
1667  * flags for each IP.
1668  * Updates @flags with the feature flags for each hardware IP where
1669  * clockgating is enabled.
1670  */
1671 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1672                                             u32 *flags)
1673 {
1674         int i;
1675
1676         for (i = 0; i < adev->num_ip_blocks; i++) {
1677                 if (!adev->ip_blocks[i].status.valid)
1678                         continue;
1679                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1680                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1681         }
1682 }
1683
1684 /**
1685  * amdgpu_device_ip_wait_for_idle - wait for idle
1686  *
1687  * @adev: amdgpu_device pointer
1688  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1689  *
1690  * Waits for the request hardware IP to be idle.
1691  * Returns 0 for success or a negative error code on failure.
1692  */
1693 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1694                                    enum amd_ip_block_type block_type)
1695 {
1696         int i, r;
1697
1698         for (i = 0; i < adev->num_ip_blocks; i++) {
1699                 if (!adev->ip_blocks[i].status.valid)
1700                         continue;
1701                 if (adev->ip_blocks[i].version->type == block_type) {
1702                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1703                         if (r)
1704                                 return r;
1705                         break;
1706                 }
1707         }
1708         return 0;
1709
1710 }
1711
1712 /**
1713  * amdgpu_device_ip_is_idle - is the hardware IP idle
1714  *
1715  * @adev: amdgpu_device pointer
1716  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1717  *
1718  * Check if the hardware IP is idle or not.
1719  * Returns true if it the IP is idle, false if not.
1720  */
1721 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1722                               enum amd_ip_block_type block_type)
1723 {
1724         int i;
1725
1726         for (i = 0; i < adev->num_ip_blocks; i++) {
1727                 if (!adev->ip_blocks[i].status.valid)
1728                         continue;
1729                 if (adev->ip_blocks[i].version->type == block_type)
1730                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1731         }
1732         return true;
1733
1734 }
1735
1736 /**
1737  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1738  *
1739  * @adev: amdgpu_device pointer
1740  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1741  *
1742  * Returns a pointer to the hardware IP block structure
1743  * if it exists for the asic, otherwise NULL.
1744  */
1745 struct amdgpu_ip_block *
1746 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1747                               enum amd_ip_block_type type)
1748 {
1749         int i;
1750
1751         for (i = 0; i < adev->num_ip_blocks; i++)
1752                 if (adev->ip_blocks[i].version->type == type)
1753                         return &adev->ip_blocks[i];
1754
1755         return NULL;
1756 }
1757
1758 /**
1759  * amdgpu_device_ip_block_version_cmp
1760  *
1761  * @adev: amdgpu_device pointer
1762  * @type: enum amd_ip_block_type
1763  * @major: major version
1764  * @minor: minor version
1765  *
1766  * return 0 if equal or greater
1767  * return 1 if smaller or the ip_block doesn't exist
1768  */
1769 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1770                                        enum amd_ip_block_type type,
1771                                        u32 major, u32 minor)
1772 {
1773         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1774
1775         if (ip_block && ((ip_block->version->major > major) ||
1776                         ((ip_block->version->major == major) &&
1777                         (ip_block->version->minor >= minor))))
1778                 return 0;
1779
1780         return 1;
1781 }
1782
1783 /**
1784  * amdgpu_device_ip_block_add
1785  *
1786  * @adev: amdgpu_device pointer
1787  * @ip_block_version: pointer to the IP to add
1788  *
1789  * Adds the IP block driver information to the collection of IPs
1790  * on the asic.
1791  */
1792 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1793                                const struct amdgpu_ip_block_version *ip_block_version)
1794 {
1795         if (!ip_block_version)
1796                 return -EINVAL;
1797
1798         switch (ip_block_version->type) {
1799         case AMD_IP_BLOCK_TYPE_VCN:
1800                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1801                         return 0;
1802                 break;
1803         case AMD_IP_BLOCK_TYPE_JPEG:
1804                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1805                         return 0;
1806                 break;
1807         default:
1808                 break;
1809         }
1810
1811         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1812                   ip_block_version->funcs->name);
1813
1814         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1815
1816         return 0;
1817 }
1818
1819 /**
1820  * amdgpu_device_enable_virtual_display - enable virtual display feature
1821  *
1822  * @adev: amdgpu_device pointer
1823  *
1824  * Enabled the virtual display feature if the user has enabled it via
1825  * the module parameter virtual_display.  This feature provides a virtual
1826  * display hardware on headless boards or in virtualized environments.
1827  * This function parses and validates the configuration string specified by
1828  * the user and configues the virtual display configuration (number of
1829  * virtual connectors, crtcs, etc.) specified.
1830  */
1831 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1832 {
1833         adev->enable_virtual_display = false;
1834
1835         if (amdgpu_virtual_display) {
1836                 const char *pci_address_name = pci_name(adev->pdev);
1837                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1838
1839                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1840                 pciaddstr_tmp = pciaddstr;
1841                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1842                         pciaddname = strsep(&pciaddname_tmp, ",");
1843                         if (!strcmp("all", pciaddname)
1844                             || !strcmp(pci_address_name, pciaddname)) {
1845                                 long num_crtc;
1846                                 int res = -1;
1847
1848                                 adev->enable_virtual_display = true;
1849
1850                                 if (pciaddname_tmp)
1851                                         res = kstrtol(pciaddname_tmp, 10,
1852                                                       &num_crtc);
1853
1854                                 if (!res) {
1855                                         if (num_crtc < 1)
1856                                                 num_crtc = 1;
1857                                         if (num_crtc > 6)
1858                                                 num_crtc = 6;
1859                                         adev->mode_info.num_crtc = num_crtc;
1860                                 } else {
1861                                         adev->mode_info.num_crtc = 1;
1862                                 }
1863                                 break;
1864                         }
1865                 }
1866
1867                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1868                          amdgpu_virtual_display, pci_address_name,
1869                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1870
1871                 kfree(pciaddstr);
1872         }
1873 }
1874
1875 /**
1876  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1877  *
1878  * @adev: amdgpu_device pointer
1879  *
1880  * Parses the asic configuration parameters specified in the gpu info
1881  * firmware and makes them availale to the driver for use in configuring
1882  * the asic.
1883  * Returns 0 on success, -EINVAL on failure.
1884  */
1885 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1886 {
1887         const char *chip_name;
1888         char fw_name[40];
1889         int err;
1890         const struct gpu_info_firmware_header_v1_0 *hdr;
1891
1892         adev->firmware.gpu_info_fw = NULL;
1893
1894         if (adev->mman.discovery_bin) {
1895                 amdgpu_discovery_get_gfx_info(adev);
1896
1897                 /*
1898                  * FIXME: The bounding box is still needed by Navi12, so
1899                  * temporarily read it from gpu_info firmware. Should be droped
1900                  * when DAL no longer needs it.
1901                  */
1902                 if (adev->asic_type != CHIP_NAVI12)
1903                         return 0;
1904         }
1905
1906         switch (adev->asic_type) {
1907 #ifdef CONFIG_DRM_AMDGPU_SI
1908         case CHIP_VERDE:
1909         case CHIP_TAHITI:
1910         case CHIP_PITCAIRN:
1911         case CHIP_OLAND:
1912         case CHIP_HAINAN:
1913 #endif
1914 #ifdef CONFIG_DRM_AMDGPU_CIK
1915         case CHIP_BONAIRE:
1916         case CHIP_HAWAII:
1917         case CHIP_KAVERI:
1918         case CHIP_KABINI:
1919         case CHIP_MULLINS:
1920 #endif
1921         case CHIP_TOPAZ:
1922         case CHIP_TONGA:
1923         case CHIP_FIJI:
1924         case CHIP_POLARIS10:
1925         case CHIP_POLARIS11:
1926         case CHIP_POLARIS12:
1927         case CHIP_VEGAM:
1928         case CHIP_CARRIZO:
1929         case CHIP_STONEY:
1930         case CHIP_VEGA20:
1931         case CHIP_ALDEBARAN:
1932         case CHIP_SIENNA_CICHLID:
1933         case CHIP_NAVY_FLOUNDER:
1934         case CHIP_DIMGREY_CAVEFISH:
1935         case CHIP_BEIGE_GOBY:
1936         default:
1937                 return 0;
1938         case CHIP_VEGA10:
1939                 chip_name = "vega10";
1940                 break;
1941         case CHIP_VEGA12:
1942                 chip_name = "vega12";
1943                 break;
1944         case CHIP_RAVEN:
1945                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1946                         chip_name = "raven2";
1947                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1948                         chip_name = "picasso";
1949                 else
1950                         chip_name = "raven";
1951                 break;
1952         case CHIP_ARCTURUS:
1953                 chip_name = "arcturus";
1954                 break;
1955         case CHIP_RENOIR:
1956                 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1957                         chip_name = "renoir";
1958                 else
1959                         chip_name = "green_sardine";
1960                 break;
1961         case CHIP_NAVI10:
1962                 chip_name = "navi10";
1963                 break;
1964         case CHIP_NAVI14:
1965                 chip_name = "navi14";
1966                 break;
1967         case CHIP_NAVI12:
1968                 chip_name = "navi12";
1969                 break;
1970         case CHIP_VANGOGH:
1971                 chip_name = "vangogh";
1972                 break;
1973         case CHIP_YELLOW_CARP:
1974                 chip_name = "yellow_carp";
1975                 break;
1976         }
1977
1978         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1979         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1980         if (err) {
1981                 dev_err(adev->dev,
1982                         "Failed to load gpu_info firmware \"%s\"\n",
1983                         fw_name);
1984                 goto out;
1985         }
1986         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1987         if (err) {
1988                 dev_err(adev->dev,
1989                         "Failed to validate gpu_info firmware \"%s\"\n",
1990                         fw_name);
1991                 goto out;
1992         }
1993
1994         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1995         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1996
1997         switch (hdr->version_major) {
1998         case 1:
1999         {
2000                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2001                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2002                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2003
2004                 /*
2005                  * Should be droped when DAL no longer needs it.
2006                  */
2007                 if (adev->asic_type == CHIP_NAVI12)
2008                         goto parse_soc_bounding_box;
2009
2010                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2011                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2012                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2013                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2014                 adev->gfx.config.max_texture_channel_caches =
2015                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
2016                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2017                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2018                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2019                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2020                 adev->gfx.config.double_offchip_lds_buf =
2021                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2022                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2023                 adev->gfx.cu_info.max_waves_per_simd =
2024                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2025                 adev->gfx.cu_info.max_scratch_slots_per_cu =
2026                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2027                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2028                 if (hdr->version_minor >= 1) {
2029                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2030                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2031                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2032                         adev->gfx.config.num_sc_per_sh =
2033                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2034                         adev->gfx.config.num_packer_per_sc =
2035                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2036                 }
2037
2038 parse_soc_bounding_box:
2039                 /*
2040                  * soc bounding box info is not integrated in disocovery table,
2041                  * we always need to parse it from gpu info firmware if needed.
2042                  */
2043                 if (hdr->version_minor == 2) {
2044                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2045                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2046                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2047                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2048                 }
2049                 break;
2050         }
2051         default:
2052                 dev_err(adev->dev,
2053                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2054                 err = -EINVAL;
2055                 goto out;
2056         }
2057 out:
2058         return err;
2059 }
2060
2061 /**
2062  * amdgpu_device_ip_early_init - run early init for hardware IPs
2063  *
2064  * @adev: amdgpu_device pointer
2065  *
2066  * Early initialization pass for hardware IPs.  The hardware IPs that make
2067  * up each asic are discovered each IP's early_init callback is run.  This
2068  * is the first stage in initializing the asic.
2069  * Returns 0 on success, negative error code on failure.
2070  */
2071 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2072 {
2073         int i, r;
2074
2075         amdgpu_device_enable_virtual_display(adev);
2076
2077         if (amdgpu_sriov_vf(adev)) {
2078                 r = amdgpu_virt_request_full_gpu(adev, true);
2079                 if (r)
2080                         return r;
2081         }
2082
2083         switch (adev->asic_type) {
2084 #ifdef CONFIG_DRM_AMDGPU_SI
2085         case CHIP_VERDE:
2086         case CHIP_TAHITI:
2087         case CHIP_PITCAIRN:
2088         case CHIP_OLAND:
2089         case CHIP_HAINAN:
2090                 adev->family = AMDGPU_FAMILY_SI;
2091                 r = si_set_ip_blocks(adev);
2092                 if (r)
2093                         return r;
2094                 break;
2095 #endif
2096 #ifdef CONFIG_DRM_AMDGPU_CIK
2097         case CHIP_BONAIRE:
2098         case CHIP_HAWAII:
2099         case CHIP_KAVERI:
2100         case CHIP_KABINI:
2101         case CHIP_MULLINS:
2102                 if (adev->flags & AMD_IS_APU)
2103                         adev->family = AMDGPU_FAMILY_KV;
2104                 else
2105                         adev->family = AMDGPU_FAMILY_CI;
2106
2107                 r = cik_set_ip_blocks(adev);
2108                 if (r)
2109                         return r;
2110                 break;
2111 #endif
2112         case CHIP_TOPAZ:
2113         case CHIP_TONGA:
2114         case CHIP_FIJI:
2115         case CHIP_POLARIS10:
2116         case CHIP_POLARIS11:
2117         case CHIP_POLARIS12:
2118         case CHIP_VEGAM:
2119         case CHIP_CARRIZO:
2120         case CHIP_STONEY:
2121                 if (adev->flags & AMD_IS_APU)
2122                         adev->family = AMDGPU_FAMILY_CZ;
2123                 else
2124                         adev->family = AMDGPU_FAMILY_VI;
2125
2126                 r = vi_set_ip_blocks(adev);
2127                 if (r)
2128                         return r;
2129                 break;
2130         default:
2131                 r = amdgpu_discovery_set_ip_blocks(adev);
2132                 if (r)
2133                         return r;
2134                 break;
2135         }
2136
2137         amdgpu_amdkfd_device_probe(adev);
2138
2139         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2140         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2141                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2142         if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2143                 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2144
2145         for (i = 0; i < adev->num_ip_blocks; i++) {
2146                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2147                         DRM_ERROR("disabled ip block: %d <%s>\n",
2148                                   i, adev->ip_blocks[i].version->funcs->name);
2149                         adev->ip_blocks[i].status.valid = false;
2150                 } else {
2151                         if (adev->ip_blocks[i].version->funcs->early_init) {
2152                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2153                                 if (r == -ENOENT) {
2154                                         adev->ip_blocks[i].status.valid = false;
2155                                 } else if (r) {
2156                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2157                                                   adev->ip_blocks[i].version->funcs->name, r);
2158                                         return r;
2159                                 } else {
2160                                         adev->ip_blocks[i].status.valid = true;
2161                                 }
2162                         } else {
2163                                 adev->ip_blocks[i].status.valid = true;
2164                         }
2165                 }
2166                 /* get the vbios after the asic_funcs are set up */
2167                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2168                         r = amdgpu_device_parse_gpu_info_fw(adev);
2169                         if (r)
2170                                 return r;
2171
2172                         /* Read BIOS */
2173                         if (!amdgpu_get_bios(adev))
2174                                 return -EINVAL;
2175
2176                         r = amdgpu_atombios_init(adev);
2177                         if (r) {
2178                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2179                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2180                                 return r;
2181                         }
2182
2183                         /*get pf2vf msg info at it's earliest time*/
2184                         if (amdgpu_sriov_vf(adev))
2185                                 amdgpu_virt_init_data_exchange(adev);
2186
2187                 }
2188         }
2189
2190         adev->cg_flags &= amdgpu_cg_mask;
2191         adev->pg_flags &= amdgpu_pg_mask;
2192
2193         return 0;
2194 }
2195
2196 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2197 {
2198         int i, r;
2199
2200         for (i = 0; i < adev->num_ip_blocks; i++) {
2201                 if (!adev->ip_blocks[i].status.sw)
2202                         continue;
2203                 if (adev->ip_blocks[i].status.hw)
2204                         continue;
2205                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2206                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2207                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2208                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2209                         if (r) {
2210                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2211                                           adev->ip_blocks[i].version->funcs->name, r);
2212                                 return r;
2213                         }
2214                         adev->ip_blocks[i].status.hw = true;
2215                 }
2216         }
2217
2218         return 0;
2219 }
2220
2221 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2222 {
2223         int i, r;
2224
2225         for (i = 0; i < adev->num_ip_blocks; i++) {
2226                 if (!adev->ip_blocks[i].status.sw)
2227                         continue;
2228                 if (adev->ip_blocks[i].status.hw)
2229                         continue;
2230                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2231                 if (r) {
2232                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2233                                   adev->ip_blocks[i].version->funcs->name, r);
2234                         return r;
2235                 }
2236                 adev->ip_blocks[i].status.hw = true;
2237         }
2238
2239         return 0;
2240 }
2241
2242 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2243 {
2244         int r = 0;
2245         int i;
2246         uint32_t smu_version;
2247
2248         if (adev->asic_type >= CHIP_VEGA10) {
2249                 for (i = 0; i < adev->num_ip_blocks; i++) {
2250                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2251                                 continue;
2252
2253                         if (!adev->ip_blocks[i].status.sw)
2254                                 continue;
2255
2256                         /* no need to do the fw loading again if already done*/
2257                         if (adev->ip_blocks[i].status.hw == true)
2258                                 break;
2259
2260                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2261                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2262                                 if (r) {
2263                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2264                                                           adev->ip_blocks[i].version->funcs->name, r);
2265                                         return r;
2266                                 }
2267                         } else {
2268                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2269                                 if (r) {
2270                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2271                                                           adev->ip_blocks[i].version->funcs->name, r);
2272                                         return r;
2273                                 }
2274                         }
2275
2276                         adev->ip_blocks[i].status.hw = true;
2277                         break;
2278                 }
2279         }
2280
2281         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2282                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2283
2284         return r;
2285 }
2286
2287 /**
2288  * amdgpu_device_ip_init - run init for hardware IPs
2289  *
2290  * @adev: amdgpu_device pointer
2291  *
2292  * Main initialization pass for hardware IPs.  The list of all the hardware
2293  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2294  * are run.  sw_init initializes the software state associated with each IP
2295  * and hw_init initializes the hardware associated with each IP.
2296  * Returns 0 on success, negative error code on failure.
2297  */
2298 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2299 {
2300         int i, r;
2301
2302         r = amdgpu_ras_init(adev);
2303         if (r)
2304                 return r;
2305
2306         for (i = 0; i < adev->num_ip_blocks; i++) {
2307                 if (!adev->ip_blocks[i].status.valid)
2308                         continue;
2309                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2310                 if (r) {
2311                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2312                                   adev->ip_blocks[i].version->funcs->name, r);
2313                         goto init_failed;
2314                 }
2315                 adev->ip_blocks[i].status.sw = true;
2316
2317                 /* need to do gmc hw init early so we can allocate gpu mem */
2318                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2319                         r = amdgpu_device_vram_scratch_init(adev);
2320                         if (r) {
2321                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2322                                 goto init_failed;
2323                         }
2324                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2325                         if (r) {
2326                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2327                                 goto init_failed;
2328                         }
2329                         r = amdgpu_device_wb_init(adev);
2330                         if (r) {
2331                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2332                                 goto init_failed;
2333                         }
2334                         adev->ip_blocks[i].status.hw = true;
2335
2336                         /* right after GMC hw init, we create CSA */
2337                         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2338                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2339                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2340                                                                 AMDGPU_CSA_SIZE);
2341                                 if (r) {
2342                                         DRM_ERROR("allocate CSA failed %d\n", r);
2343                                         goto init_failed;
2344                                 }
2345                         }
2346                 }
2347         }
2348
2349         if (amdgpu_sriov_vf(adev))
2350                 amdgpu_virt_init_data_exchange(adev);
2351
2352         r = amdgpu_ib_pool_init(adev);
2353         if (r) {
2354                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2355                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2356                 goto init_failed;
2357         }
2358
2359         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2360         if (r)
2361                 goto init_failed;
2362
2363         r = amdgpu_device_ip_hw_init_phase1(adev);
2364         if (r)
2365                 goto init_failed;
2366
2367         r = amdgpu_device_fw_loading(adev);
2368         if (r)
2369                 goto init_failed;
2370
2371         r = amdgpu_device_ip_hw_init_phase2(adev);
2372         if (r)
2373                 goto init_failed;
2374
2375         /*
2376          * retired pages will be loaded from eeprom and reserved here,
2377          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2378          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2379          * for I2C communication which only true at this point.
2380          *
2381          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2382          * failure from bad gpu situation and stop amdgpu init process
2383          * accordingly. For other failed cases, it will still release all
2384          * the resource and print error message, rather than returning one
2385          * negative value to upper level.
2386          *
2387          * Note: theoretically, this should be called before all vram allocations
2388          * to protect retired page from abusing
2389          */
2390         r = amdgpu_ras_recovery_init(adev);
2391         if (r)
2392                 goto init_failed;
2393
2394         if (adev->gmc.xgmi.num_physical_nodes > 1)
2395                 amdgpu_xgmi_add_device(adev);
2396
2397         /* Don't init kfd if whole hive need to be reset during init */
2398         if (!adev->gmc.xgmi.pending_reset)
2399                 amdgpu_amdkfd_device_init(adev);
2400
2401         r = amdgpu_amdkfd_resume_iommu(adev);
2402         if (r)
2403                 goto init_failed;
2404
2405         amdgpu_fru_get_product_info(adev);
2406
2407 init_failed:
2408         if (amdgpu_sriov_vf(adev))
2409                 amdgpu_virt_release_full_gpu(adev, true);
2410
2411         return r;
2412 }
2413
2414 /**
2415  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2416  *
2417  * @adev: amdgpu_device pointer
2418  *
2419  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2420  * this function before a GPU reset.  If the value is retained after a
2421  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2422  */
2423 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2424 {
2425         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2426 }
2427
2428 /**
2429  * amdgpu_device_check_vram_lost - check if vram is valid
2430  *
2431  * @adev: amdgpu_device pointer
2432  *
2433  * Checks the reset magic value written to the gart pointer in VRAM.
2434  * The driver calls this after a GPU reset to see if the contents of
2435  * VRAM is lost or now.
2436  * returns true if vram is lost, false if not.
2437  */
2438 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2439 {
2440         if (memcmp(adev->gart.ptr, adev->reset_magic,
2441                         AMDGPU_RESET_MAGIC_NUM))
2442                 return true;
2443
2444         if (!amdgpu_in_reset(adev))
2445                 return false;
2446
2447         /*
2448          * For all ASICs with baco/mode1 reset, the VRAM is
2449          * always assumed to be lost.
2450          */
2451         switch (amdgpu_asic_reset_method(adev)) {
2452         case AMD_RESET_METHOD_BACO:
2453         case AMD_RESET_METHOD_MODE1:
2454                 return true;
2455         default:
2456                 return false;
2457         }
2458 }
2459
2460 /**
2461  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2462  *
2463  * @adev: amdgpu_device pointer
2464  * @state: clockgating state (gate or ungate)
2465  *
2466  * The list of all the hardware IPs that make up the asic is walked and the
2467  * set_clockgating_state callbacks are run.
2468  * Late initialization pass enabling clockgating for hardware IPs.
2469  * Fini or suspend, pass disabling clockgating for hardware IPs.
2470  * Returns 0 on success, negative error code on failure.
2471  */
2472
2473 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2474                                enum amd_clockgating_state state)
2475 {
2476         int i, j, r;
2477
2478         if (amdgpu_emu_mode == 1)
2479                 return 0;
2480
2481         for (j = 0; j < adev->num_ip_blocks; j++) {
2482                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2483                 if (!adev->ip_blocks[i].status.late_initialized)
2484                         continue;
2485                 /* skip CG for GFX on S0ix */
2486                 if (adev->in_s0ix &&
2487                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2488                         continue;
2489                 /* skip CG for VCE/UVD, it's handled specially */
2490                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2491                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2492                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2493                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2494                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2495                         /* enable clockgating to save power */
2496                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2497                                                                                      state);
2498                         if (r) {
2499                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2500                                           adev->ip_blocks[i].version->funcs->name, r);
2501                                 return r;
2502                         }
2503                 }
2504         }
2505
2506         return 0;
2507 }
2508
2509 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2510                                enum amd_powergating_state state)
2511 {
2512         int i, j, r;
2513
2514         if (amdgpu_emu_mode == 1)
2515                 return 0;
2516
2517         for (j = 0; j < adev->num_ip_blocks; j++) {
2518                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2519                 if (!adev->ip_blocks[i].status.late_initialized)
2520                         continue;
2521                 /* skip PG for GFX on S0ix */
2522                 if (adev->in_s0ix &&
2523                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2524                         continue;
2525                 /* skip CG for VCE/UVD, it's handled specially */
2526                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2527                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2528                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2529                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2530                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2531                         /* enable powergating to save power */
2532                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2533                                                                                         state);
2534                         if (r) {
2535                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2536                                           adev->ip_blocks[i].version->funcs->name, r);
2537                                 return r;
2538                         }
2539                 }
2540         }
2541         return 0;
2542 }
2543
2544 static int amdgpu_device_enable_mgpu_fan_boost(void)
2545 {
2546         struct amdgpu_gpu_instance *gpu_ins;
2547         struct amdgpu_device *adev;
2548         int i, ret = 0;
2549
2550         mutex_lock(&mgpu_info.mutex);
2551
2552         /*
2553          * MGPU fan boost feature should be enabled
2554          * only when there are two or more dGPUs in
2555          * the system
2556          */
2557         if (mgpu_info.num_dgpu < 2)
2558                 goto out;
2559
2560         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2561                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2562                 adev = gpu_ins->adev;
2563                 if (!(adev->flags & AMD_IS_APU) &&
2564                     !gpu_ins->mgpu_fan_enabled) {
2565                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2566                         if (ret)
2567                                 break;
2568
2569                         gpu_ins->mgpu_fan_enabled = 1;
2570                 }
2571         }
2572
2573 out:
2574         mutex_unlock(&mgpu_info.mutex);
2575
2576         return ret;
2577 }
2578
2579 /**
2580  * amdgpu_device_ip_late_init - run late init for hardware IPs
2581  *
2582  * @adev: amdgpu_device pointer
2583  *
2584  * Late initialization pass for hardware IPs.  The list of all the hardware
2585  * IPs that make up the asic is walked and the late_init callbacks are run.
2586  * late_init covers any special initialization that an IP requires
2587  * after all of the have been initialized or something that needs to happen
2588  * late in the init process.
2589  * Returns 0 on success, negative error code on failure.
2590  */
2591 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2592 {
2593         struct amdgpu_gpu_instance *gpu_instance;
2594         int i = 0, r;
2595
2596         for (i = 0; i < adev->num_ip_blocks; i++) {
2597                 if (!adev->ip_blocks[i].status.hw)
2598                         continue;
2599                 if (adev->ip_blocks[i].version->funcs->late_init) {
2600                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2601                         if (r) {
2602                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2603                                           adev->ip_blocks[i].version->funcs->name, r);
2604                                 return r;
2605                         }
2606                 }
2607                 adev->ip_blocks[i].status.late_initialized = true;
2608         }
2609
2610         amdgpu_ras_set_error_query_ready(adev, true);
2611
2612         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2613         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2614
2615         amdgpu_device_fill_reset_magic(adev);
2616
2617         r = amdgpu_device_enable_mgpu_fan_boost();
2618         if (r)
2619                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2620
2621         /* For XGMI + passthrough configuration on arcturus, enable light SBR */
2622         if (adev->asic_type == CHIP_ARCTURUS &&
2623             amdgpu_passthrough(adev) &&
2624             adev->gmc.xgmi.num_physical_nodes > 1)
2625                 smu_set_light_sbr(&adev->smu, true);
2626
2627         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2628                 mutex_lock(&mgpu_info.mutex);
2629
2630                 /*
2631                  * Reset device p-state to low as this was booted with high.
2632                  *
2633                  * This should be performed only after all devices from the same
2634                  * hive get initialized.
2635                  *
2636                  * However, it's unknown how many device in the hive in advance.
2637                  * As this is counted one by one during devices initializations.
2638                  *
2639                  * So, we wait for all XGMI interlinked devices initialized.
2640                  * This may bring some delays as those devices may come from
2641                  * different hives. But that should be OK.
2642                  */
2643                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2644                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2645                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2646                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2647                                         continue;
2648
2649                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2650                                                 AMDGPU_XGMI_PSTATE_MIN);
2651                                 if (r) {
2652                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2653                                         break;
2654                                 }
2655                         }
2656                 }
2657
2658                 mutex_unlock(&mgpu_info.mutex);
2659         }
2660
2661         return 0;
2662 }
2663
2664 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2665 {
2666         int i, r;
2667
2668         for (i = 0; i < adev->num_ip_blocks; i++) {
2669                 if (!adev->ip_blocks[i].version->funcs->early_fini)
2670                         continue;
2671
2672                 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2673                 if (r) {
2674                         DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2675                                   adev->ip_blocks[i].version->funcs->name, r);
2676                 }
2677         }
2678
2679         amdgpu_amdkfd_suspend(adev, false);
2680
2681         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2682         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2683
2684         /* need to disable SMC first */
2685         for (i = 0; i < adev->num_ip_blocks; i++) {
2686                 if (!adev->ip_blocks[i].status.hw)
2687                         continue;
2688                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2689                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2690                         /* XXX handle errors */
2691                         if (r) {
2692                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2693                                           adev->ip_blocks[i].version->funcs->name, r);
2694                         }
2695                         adev->ip_blocks[i].status.hw = false;
2696                         break;
2697                 }
2698         }
2699
2700         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2701                 if (!adev->ip_blocks[i].status.hw)
2702                         continue;
2703
2704                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2705                 /* XXX handle errors */
2706                 if (r) {
2707                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2708                                   adev->ip_blocks[i].version->funcs->name, r);
2709                 }
2710
2711                 adev->ip_blocks[i].status.hw = false;
2712         }
2713
2714         if (amdgpu_sriov_vf(adev)) {
2715                 if (amdgpu_virt_release_full_gpu(adev, false))
2716                         DRM_ERROR("failed to release exclusive mode on fini\n");
2717         }
2718
2719         return 0;
2720 }
2721
2722 /**
2723  * amdgpu_device_ip_fini - run fini for hardware IPs
2724  *
2725  * @adev: amdgpu_device pointer
2726  *
2727  * Main teardown pass for hardware IPs.  The list of all the hardware
2728  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2729  * are run.  hw_fini tears down the hardware associated with each IP
2730  * and sw_fini tears down any software state associated with each IP.
2731  * Returns 0 on success, negative error code on failure.
2732  */
2733 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2734 {
2735         int i, r;
2736
2737         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2738                 amdgpu_virt_release_ras_err_handler_data(adev);
2739
2740         amdgpu_ras_pre_fini(adev);
2741
2742         if (adev->gmc.xgmi.num_physical_nodes > 1)
2743                 amdgpu_xgmi_remove_device(adev);
2744
2745         amdgpu_amdkfd_device_fini_sw(adev);
2746
2747         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2748                 if (!adev->ip_blocks[i].status.sw)
2749                         continue;
2750
2751                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2752                         amdgpu_ucode_free_bo(adev);
2753                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2754                         amdgpu_device_wb_fini(adev);
2755                         amdgpu_device_vram_scratch_fini(adev);
2756                         amdgpu_ib_pool_fini(adev);
2757                 }
2758
2759                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2760                 /* XXX handle errors */
2761                 if (r) {
2762                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2763                                   adev->ip_blocks[i].version->funcs->name, r);
2764                 }
2765                 adev->ip_blocks[i].status.sw = false;
2766                 adev->ip_blocks[i].status.valid = false;
2767         }
2768
2769         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2770                 if (!adev->ip_blocks[i].status.late_initialized)
2771                         continue;
2772                 if (adev->ip_blocks[i].version->funcs->late_fini)
2773                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2774                 adev->ip_blocks[i].status.late_initialized = false;
2775         }
2776
2777         amdgpu_ras_fini(adev);
2778
2779         return 0;
2780 }
2781
2782 /**
2783  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2784  *
2785  * @work: work_struct.
2786  */
2787 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2788 {
2789         struct amdgpu_device *adev =
2790                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2791         int r;
2792
2793         r = amdgpu_ib_ring_tests(adev);
2794         if (r)
2795                 DRM_ERROR("ib ring test failed (%d).\n", r);
2796 }
2797
2798 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2799 {
2800         struct amdgpu_device *adev =
2801                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2802
2803         WARN_ON_ONCE(adev->gfx.gfx_off_state);
2804         WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2805
2806         if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2807                 adev->gfx.gfx_off_state = true;
2808 }
2809
2810 /**
2811  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2812  *
2813  * @adev: amdgpu_device pointer
2814  *
2815  * Main suspend function for hardware IPs.  The list of all the hardware
2816  * IPs that make up the asic is walked, clockgating is disabled and the
2817  * suspend callbacks are run.  suspend puts the hardware and software state
2818  * in each IP into a state suitable for suspend.
2819  * Returns 0 on success, negative error code on failure.
2820  */
2821 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2822 {
2823         int i, r;
2824
2825         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2826         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2827
2828         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2829                 if (!adev->ip_blocks[i].status.valid)
2830                         continue;
2831
2832                 /* displays are handled separately */
2833                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2834                         continue;
2835
2836                 /* XXX handle errors */
2837                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2838                 /* XXX handle errors */
2839                 if (r) {
2840                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2841                                   adev->ip_blocks[i].version->funcs->name, r);
2842                         return r;
2843                 }
2844
2845                 adev->ip_blocks[i].status.hw = false;
2846         }
2847
2848         return 0;
2849 }
2850
2851 /**
2852  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2853  *
2854  * @adev: amdgpu_device pointer
2855  *
2856  * Main suspend function for hardware IPs.  The list of all the hardware
2857  * IPs that make up the asic is walked, clockgating is disabled and the
2858  * suspend callbacks are run.  suspend puts the hardware and software state
2859  * in each IP into a state suitable for suspend.
2860  * Returns 0 on success, negative error code on failure.
2861  */
2862 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2863 {
2864         int i, r;
2865
2866         if (adev->in_s0ix)
2867                 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
2868
2869         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2870                 if (!adev->ip_blocks[i].status.valid)
2871                         continue;
2872                 /* displays are handled in phase1 */
2873                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2874                         continue;
2875                 /* PSP lost connection when err_event_athub occurs */
2876                 if (amdgpu_ras_intr_triggered() &&
2877                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2878                         adev->ip_blocks[i].status.hw = false;
2879                         continue;
2880                 }
2881
2882                 /* skip unnecessary suspend if we do not initialize them yet */
2883                 if (adev->gmc.xgmi.pending_reset &&
2884                     !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2885                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2886                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2887                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2888                         adev->ip_blocks[i].status.hw = false;
2889                         continue;
2890                 }
2891
2892                 /* skip suspend of gfx and psp for S0ix
2893                  * gfx is in gfxoff state, so on resume it will exit gfxoff just
2894                  * like at runtime. PSP is also part of the always on hardware
2895                  * so no need to suspend it.
2896                  */
2897                 if (adev->in_s0ix &&
2898                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2899                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
2900                         continue;
2901
2902                 /* XXX handle errors */
2903                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2904                 /* XXX handle errors */
2905                 if (r) {
2906                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2907                                   adev->ip_blocks[i].version->funcs->name, r);
2908                 }
2909                 adev->ip_blocks[i].status.hw = false;
2910                 /* handle putting the SMC in the appropriate state */
2911                 if(!amdgpu_sriov_vf(adev)){
2912                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2913                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2914                                 if (r) {
2915                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2916                                                         adev->mp1_state, r);
2917                                         return r;
2918                                 }
2919                         }
2920                 }
2921         }
2922
2923         return 0;
2924 }
2925
2926 /**
2927  * amdgpu_device_ip_suspend - run suspend for hardware IPs
2928  *
2929  * @adev: amdgpu_device pointer
2930  *
2931  * Main suspend function for hardware IPs.  The list of all the hardware
2932  * IPs that make up the asic is walked, clockgating is disabled and the
2933  * suspend callbacks are run.  suspend puts the hardware and software state
2934  * in each IP into a state suitable for suspend.
2935  * Returns 0 on success, negative error code on failure.
2936  */
2937 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2938 {
2939         int r;
2940
2941         if (amdgpu_sriov_vf(adev)) {
2942                 amdgpu_virt_fini_data_exchange(adev);
2943                 amdgpu_virt_request_full_gpu(adev, false);
2944         }
2945
2946         r = amdgpu_device_ip_suspend_phase1(adev);
2947         if (r)
2948                 return r;
2949         r = amdgpu_device_ip_suspend_phase2(adev);
2950
2951         if (amdgpu_sriov_vf(adev))
2952                 amdgpu_virt_release_full_gpu(adev, false);
2953
2954         return r;
2955 }
2956
2957 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2958 {
2959         int i, r;
2960
2961         static enum amd_ip_block_type ip_order[] = {
2962                 AMD_IP_BLOCK_TYPE_GMC,
2963                 AMD_IP_BLOCK_TYPE_COMMON,
2964                 AMD_IP_BLOCK_TYPE_PSP,
2965                 AMD_IP_BLOCK_TYPE_IH,
2966         };
2967
2968         for (i = 0; i < adev->num_ip_blocks; i++) {
2969                 int j;
2970                 struct amdgpu_ip_block *block;
2971
2972                 block = &adev->ip_blocks[i];
2973                 block->status.hw = false;
2974
2975                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2976
2977                         if (block->version->type != ip_order[j] ||
2978                                 !block->status.valid)
2979                                 continue;
2980
2981                         r = block->version->funcs->hw_init(adev);
2982                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2983                         if (r)
2984                                 return r;
2985                         block->status.hw = true;
2986                 }
2987         }
2988
2989         return 0;
2990 }
2991
2992 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2993 {
2994         int i, r;
2995
2996         static enum amd_ip_block_type ip_order[] = {
2997                 AMD_IP_BLOCK_TYPE_SMC,
2998                 AMD_IP_BLOCK_TYPE_DCE,
2999                 AMD_IP_BLOCK_TYPE_GFX,
3000                 AMD_IP_BLOCK_TYPE_SDMA,
3001                 AMD_IP_BLOCK_TYPE_UVD,
3002                 AMD_IP_BLOCK_TYPE_VCE,
3003                 AMD_IP_BLOCK_TYPE_VCN
3004         };
3005
3006         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3007                 int j;
3008                 struct amdgpu_ip_block *block;
3009
3010                 for (j = 0; j < adev->num_ip_blocks; j++) {
3011                         block = &adev->ip_blocks[j];
3012
3013                         if (block->version->type != ip_order[i] ||
3014                                 !block->status.valid ||
3015                                 block->status.hw)
3016                                 continue;
3017
3018                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3019                                 r = block->version->funcs->resume(adev);
3020                         else
3021                                 r = block->version->funcs->hw_init(adev);
3022
3023                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3024                         if (r)
3025                                 return r;
3026                         block->status.hw = true;
3027                 }
3028         }
3029
3030         return 0;
3031 }
3032
3033 /**
3034  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3035  *
3036  * @adev: amdgpu_device pointer
3037  *
3038  * First resume function for hardware IPs.  The list of all the hardware
3039  * IPs that make up the asic is walked and the resume callbacks are run for
3040  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3041  * after a suspend and updates the software state as necessary.  This
3042  * function is also used for restoring the GPU after a GPU reset.
3043  * Returns 0 on success, negative error code on failure.
3044  */
3045 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3046 {
3047         int i, r;
3048
3049         for (i = 0; i < adev->num_ip_blocks; i++) {
3050                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3051                         continue;
3052                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3053                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3054                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
3055
3056                         r = adev->ip_blocks[i].version->funcs->resume(adev);
3057                         if (r) {
3058                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
3059                                           adev->ip_blocks[i].version->funcs->name, r);
3060                                 return r;
3061                         }
3062                         adev->ip_blocks[i].status.hw = true;
3063                 }
3064         }
3065
3066         return 0;
3067 }
3068
3069 /**
3070  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3071  *
3072  * @adev: amdgpu_device pointer
3073  *
3074  * First resume function for hardware IPs.  The list of all the hardware
3075  * IPs that make up the asic is walked and the resume callbacks are run for
3076  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3077  * functional state after a suspend and updates the software state as
3078  * necessary.  This function is also used for restoring the GPU after a GPU
3079  * reset.
3080  * Returns 0 on success, negative error code on failure.
3081  */
3082 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3083 {
3084         int i, r;
3085
3086         for (i = 0; i < adev->num_ip_blocks; i++) {
3087                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3088                         continue;
3089                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3090                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3091                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3092                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3093                         continue;
3094                 r = adev->ip_blocks[i].version->funcs->resume(adev);
3095                 if (r) {
3096                         DRM_ERROR("resume of IP block <%s> failed %d\n",
3097                                   adev->ip_blocks[i].version->funcs->name, r);
3098                         return r;
3099                 }
3100                 adev->ip_blocks[i].status.hw = true;
3101         }
3102
3103         return 0;
3104 }
3105
3106 /**
3107  * amdgpu_device_ip_resume - run resume for hardware IPs
3108  *
3109  * @adev: amdgpu_device pointer
3110  *
3111  * Main resume function for hardware IPs.  The hardware IPs
3112  * are split into two resume functions because they are
3113  * are also used in in recovering from a GPU reset and some additional
3114  * steps need to be take between them.  In this case (S3/S4) they are
3115  * run sequentially.
3116  * Returns 0 on success, negative error code on failure.
3117  */
3118 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3119 {
3120         int r;
3121
3122         r = amdgpu_amdkfd_resume_iommu(adev);
3123         if (r)
3124                 return r;
3125
3126         r = amdgpu_device_ip_resume_phase1(adev);
3127         if (r)
3128                 return r;
3129
3130         r = amdgpu_device_fw_loading(adev);
3131         if (r)
3132                 return r;
3133
3134         r = amdgpu_device_ip_resume_phase2(adev);
3135
3136         return r;
3137 }
3138
3139 /**
3140  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3141  *
3142  * @adev: amdgpu_device pointer
3143  *
3144  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3145  */
3146 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3147 {
3148         if (amdgpu_sriov_vf(adev)) {
3149                 if (adev->is_atom_fw) {
3150                         if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3151                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3152                 } else {
3153                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3154                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3155                 }
3156
3157                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3158                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3159         }
3160 }
3161
3162 /**
3163  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3164  *
3165  * @asic_type: AMD asic type
3166  *
3167  * Check if there is DC (new modesetting infrastructre) support for an asic.
3168  * returns true if DC has support, false if not.
3169  */
3170 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3171 {
3172         switch (asic_type) {
3173 #if defined(CONFIG_DRM_AMD_DC)
3174 #if defined(CONFIG_DRM_AMD_DC_SI)
3175         case CHIP_TAHITI:
3176         case CHIP_PITCAIRN:
3177         case CHIP_VERDE:
3178         case CHIP_OLAND:
3179 #endif
3180         case CHIP_BONAIRE:
3181         case CHIP_KAVERI:
3182         case CHIP_KABINI:
3183         case CHIP_MULLINS:
3184                 /*
3185                  * We have systems in the wild with these ASICs that require
3186                  * LVDS and VGA support which is not supported with DC.
3187                  *
3188                  * Fallback to the non-DC driver here by default so as not to
3189                  * cause regressions.
3190                  */
3191                 return amdgpu_dc > 0;
3192         case CHIP_HAWAII:
3193         case CHIP_CARRIZO:
3194         case CHIP_STONEY:
3195         case CHIP_POLARIS10:
3196         case CHIP_POLARIS11:
3197         case CHIP_POLARIS12:
3198         case CHIP_VEGAM:
3199         case CHIP_TONGA:
3200         case CHIP_FIJI:
3201         case CHIP_VEGA10:
3202         case CHIP_VEGA12:
3203         case CHIP_VEGA20:
3204 #if defined(CONFIG_DRM_AMD_DC_DCN)
3205         case CHIP_RAVEN:
3206         case CHIP_NAVI10:
3207         case CHIP_NAVI14:
3208         case CHIP_NAVI12:
3209         case CHIP_RENOIR:
3210         case CHIP_CYAN_SKILLFISH:
3211         case CHIP_SIENNA_CICHLID:
3212         case CHIP_NAVY_FLOUNDER:
3213         case CHIP_DIMGREY_CAVEFISH:
3214         case CHIP_BEIGE_GOBY:
3215         case CHIP_VANGOGH:
3216         case CHIP_YELLOW_CARP:
3217 #endif
3218         default:
3219                 return amdgpu_dc != 0;
3220 #else
3221         default:
3222                 if (amdgpu_dc > 0)
3223                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3224                                          "but isn't supported by ASIC, ignoring\n");
3225                 return false;
3226 #endif
3227         }
3228 }
3229
3230 /**
3231  * amdgpu_device_has_dc_support - check if dc is supported
3232  *
3233  * @adev: amdgpu_device pointer
3234  *
3235  * Returns true for supported, false for not supported
3236  */
3237 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3238 {
3239         if (amdgpu_sriov_vf(adev) || 
3240             adev->enable_virtual_display ||
3241             (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3242                 return false;
3243
3244         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3245 }
3246
3247 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3248 {
3249         struct amdgpu_device *adev =
3250                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3251         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3252
3253         /* It's a bug to not have a hive within this function */
3254         if (WARN_ON(!hive))
3255                 return;
3256
3257         /*
3258          * Use task barrier to synchronize all xgmi reset works across the
3259          * hive. task_barrier_enter and task_barrier_exit will block
3260          * until all the threads running the xgmi reset works reach
3261          * those points. task_barrier_full will do both blocks.
3262          */
3263         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3264
3265                 task_barrier_enter(&hive->tb);
3266                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3267
3268                 if (adev->asic_reset_res)
3269                         goto fail;
3270
3271                 task_barrier_exit(&hive->tb);
3272                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3273
3274                 if (adev->asic_reset_res)
3275                         goto fail;
3276
3277                 if (adev->mmhub.ras_funcs &&
3278                     adev->mmhub.ras_funcs->reset_ras_error_count)
3279                         adev->mmhub.ras_funcs->reset_ras_error_count(adev);
3280         } else {
3281
3282                 task_barrier_full(&hive->tb);
3283                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3284         }
3285
3286 fail:
3287         if (adev->asic_reset_res)
3288                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3289                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3290         amdgpu_put_xgmi_hive(hive);
3291 }
3292
3293 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3294 {
3295         char *input = amdgpu_lockup_timeout;
3296         char *timeout_setting = NULL;
3297         int index = 0;
3298         long timeout;
3299         int ret = 0;
3300
3301         /*
3302          * By default timeout for non compute jobs is 10000
3303          * and 60000 for compute jobs.
3304          * In SR-IOV or passthrough mode, timeout for compute
3305          * jobs are 60000 by default.
3306          */
3307         adev->gfx_timeout = msecs_to_jiffies(10000);
3308         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3309         if (amdgpu_sriov_vf(adev))
3310                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3311                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3312         else
3313                 adev->compute_timeout =  msecs_to_jiffies(60000);
3314
3315         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3316                 while ((timeout_setting = strsep(&input, ",")) &&
3317                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3318                         ret = kstrtol(timeout_setting, 0, &timeout);
3319                         if (ret)
3320                                 return ret;
3321
3322                         if (timeout == 0) {
3323                                 index++;
3324                                 continue;
3325                         } else if (timeout < 0) {
3326                                 timeout = MAX_SCHEDULE_TIMEOUT;
3327                         } else {
3328                                 timeout = msecs_to_jiffies(timeout);
3329                         }
3330
3331                         switch (index++) {
3332                         case 0:
3333                                 adev->gfx_timeout = timeout;
3334                                 break;
3335                         case 1:
3336                                 adev->compute_timeout = timeout;
3337                                 break;
3338                         case 2:
3339                                 adev->sdma_timeout = timeout;
3340                                 break;
3341                         case 3:
3342                                 adev->video_timeout = timeout;
3343                                 break;
3344                         default:
3345                                 break;
3346                         }
3347                 }
3348                 /*
3349                  * There is only one value specified and
3350                  * it should apply to all non-compute jobs.
3351                  */
3352                 if (index == 1) {
3353                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3354                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3355                                 adev->compute_timeout = adev->gfx_timeout;
3356                 }
3357         }
3358
3359         return ret;
3360 }
3361
3362 static const struct attribute *amdgpu_dev_attributes[] = {
3363         &dev_attr_product_name.attr,
3364         &dev_attr_product_number.attr,
3365         &dev_attr_serial_number.attr,
3366         &dev_attr_pcie_replay_count.attr,
3367         NULL
3368 };
3369
3370 /**
3371  * amdgpu_device_init - initialize the driver
3372  *
3373  * @adev: amdgpu_device pointer
3374  * @flags: driver flags
3375  *
3376  * Initializes the driver info and hw (all asics).
3377  * Returns 0 for success or an error on failure.
3378  * Called at driver startup.
3379  */
3380 int amdgpu_device_init(struct amdgpu_device *adev,
3381                        uint32_t flags)
3382 {
3383         struct drm_device *ddev = adev_to_drm(adev);
3384         struct pci_dev *pdev = adev->pdev;
3385         int r, i;
3386         bool px = false;
3387         u32 max_MBps;
3388
3389         adev->shutdown = false;
3390         adev->flags = flags;
3391
3392         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3393                 adev->asic_type = amdgpu_force_asic_type;
3394         else
3395                 adev->asic_type = flags & AMD_ASIC_MASK;
3396
3397         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3398         if (amdgpu_emu_mode == 1)
3399                 adev->usec_timeout *= 10;
3400         adev->gmc.gart_size = 512 * 1024 * 1024;
3401         adev->accel_working = false;
3402         adev->num_rings = 0;
3403         adev->mman.buffer_funcs = NULL;
3404         adev->mman.buffer_funcs_ring = NULL;
3405         adev->vm_manager.vm_pte_funcs = NULL;
3406         adev->vm_manager.vm_pte_num_scheds = 0;
3407         adev->gmc.gmc_funcs = NULL;
3408         adev->harvest_ip_mask = 0x0;
3409         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3410         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3411
3412         adev->smc_rreg = &amdgpu_invalid_rreg;
3413         adev->smc_wreg = &amdgpu_invalid_wreg;
3414         adev->pcie_rreg = &amdgpu_invalid_rreg;
3415         adev->pcie_wreg = &amdgpu_invalid_wreg;
3416         adev->pciep_rreg = &amdgpu_invalid_rreg;
3417         adev->pciep_wreg = &amdgpu_invalid_wreg;
3418         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3419         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3420         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3421         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3422         adev->didt_rreg = &amdgpu_invalid_rreg;
3423         adev->didt_wreg = &amdgpu_invalid_wreg;
3424         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3425         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3426         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3427         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3428
3429         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3430                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3431                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3432
3433         /* mutex initialization are all done here so we
3434          * can recall function without having locking issues */
3435         mutex_init(&adev->firmware.mutex);
3436         mutex_init(&adev->pm.mutex);
3437         mutex_init(&adev->gfx.gpu_clock_mutex);
3438         mutex_init(&adev->srbm_mutex);
3439         mutex_init(&adev->gfx.pipe_reserve_mutex);
3440         mutex_init(&adev->gfx.gfx_off_mutex);
3441         mutex_init(&adev->grbm_idx_mutex);
3442         mutex_init(&adev->mn_lock);
3443         mutex_init(&adev->virt.vf_errors.lock);
3444         hash_init(adev->mn_hash);
3445         atomic_set(&adev->in_gpu_reset, 0);
3446         init_rwsem(&adev->reset_sem);
3447         mutex_init(&adev->psp.mutex);
3448         mutex_init(&adev->notifier_lock);
3449
3450         r = amdgpu_device_init_apu_flags(adev);
3451         if (r)
3452                 return r;
3453
3454         r = amdgpu_device_check_arguments(adev);
3455         if (r)
3456                 return r;
3457
3458         spin_lock_init(&adev->mmio_idx_lock);
3459         spin_lock_init(&adev->smc_idx_lock);
3460         spin_lock_init(&adev->pcie_idx_lock);
3461         spin_lock_init(&adev->uvd_ctx_idx_lock);
3462         spin_lock_init(&adev->didt_idx_lock);
3463         spin_lock_init(&adev->gc_cac_idx_lock);
3464         spin_lock_init(&adev->se_cac_idx_lock);
3465         spin_lock_init(&adev->audio_endpt_idx_lock);
3466         spin_lock_init(&adev->mm_stats.lock);
3467
3468         INIT_LIST_HEAD(&adev->shadow_list);
3469         mutex_init(&adev->shadow_list_lock);
3470
3471         INIT_LIST_HEAD(&adev->reset_list);
3472
3473         INIT_DELAYED_WORK(&adev->delayed_init_work,
3474                           amdgpu_device_delayed_init_work_handler);
3475         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3476                           amdgpu_device_delay_enable_gfx_off);
3477
3478         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3479
3480         adev->gfx.gfx_off_req_count = 1;
3481         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3482
3483         atomic_set(&adev->throttling_logging_enabled, 1);
3484         /*
3485          * If throttling continues, logging will be performed every minute
3486          * to avoid log flooding. "-1" is subtracted since the thermal
3487          * throttling interrupt comes every second. Thus, the total logging
3488          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3489          * for throttling interrupt) = 60 seconds.
3490          */
3491         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3492         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3493
3494         /* Registers mapping */
3495         /* TODO: block userspace mapping of io register */
3496         if (adev->asic_type >= CHIP_BONAIRE) {
3497                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3498                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3499         } else {
3500                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3501                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3502         }
3503
3504         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3505         if (adev->rmmio == NULL) {
3506                 return -ENOMEM;
3507         }
3508         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3509         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3510
3511         amdgpu_device_get_pcie_info(adev);
3512
3513         if (amdgpu_mcbp)
3514                 DRM_INFO("MCBP is enabled\n");
3515
3516         if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3517                 adev->enable_mes = true;
3518
3519         /* detect hw virtualization here */
3520         amdgpu_detect_virtualization(adev);
3521
3522         r = amdgpu_device_get_job_timeout_settings(adev);
3523         if (r) {
3524                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3525                 return r;
3526         }
3527
3528         /* early init functions */
3529         r = amdgpu_device_ip_early_init(adev);
3530         if (r)
3531                 return r;
3532
3533         /* enable PCIE atomic ops */
3534         if (amdgpu_sriov_vf(adev))
3535                 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3536                         adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags ==
3537                         (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3538         else
3539                 adev->have_atomics_support =
3540                         !pci_enable_atomic_ops_to_root(adev->pdev,
3541                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3542                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3543         if (!adev->have_atomics_support)
3544                 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3545
3546         /* doorbell bar mapping and doorbell index init*/
3547         amdgpu_device_doorbell_init(adev);
3548
3549         if (amdgpu_emu_mode == 1) {
3550                 /* post the asic on emulation mode */
3551                 emu_soc_asic_init(adev);
3552                 goto fence_driver_init;
3553         }
3554
3555         amdgpu_reset_init(adev);
3556
3557         /* detect if we are with an SRIOV vbios */
3558         amdgpu_device_detect_sriov_bios(adev);
3559
3560         /* check if we need to reset the asic
3561          *  E.g., driver was not cleanly unloaded previously, etc.
3562          */
3563         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3564                 if (adev->gmc.xgmi.num_physical_nodes) {
3565                         dev_info(adev->dev, "Pending hive reset.\n");
3566                         adev->gmc.xgmi.pending_reset = true;
3567                         /* Only need to init necessary block for SMU to handle the reset */
3568                         for (i = 0; i < adev->num_ip_blocks; i++) {
3569                                 if (!adev->ip_blocks[i].status.valid)
3570                                         continue;
3571                                 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3572                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3573                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3574                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3575                                         DRM_DEBUG("IP %s disabled for hw_init.\n",
3576                                                 adev->ip_blocks[i].version->funcs->name);
3577                                         adev->ip_blocks[i].status.hw = true;
3578                                 }
3579                         }
3580                 } else {
3581                         r = amdgpu_asic_reset(adev);
3582                         if (r) {
3583                                 dev_err(adev->dev, "asic reset on init failed\n");
3584                                 goto failed;
3585                         }
3586                 }
3587         }
3588
3589         pci_enable_pcie_error_reporting(adev->pdev);
3590
3591         /* Post card if necessary */
3592         if (amdgpu_device_need_post(adev)) {
3593                 if (!adev->bios) {
3594                         dev_err(adev->dev, "no vBIOS found\n");
3595                         r = -EINVAL;
3596                         goto failed;
3597                 }
3598                 DRM_INFO("GPU posting now...\n");
3599                 r = amdgpu_device_asic_init(adev);
3600                 if (r) {
3601                         dev_err(adev->dev, "gpu post error!\n");
3602                         goto failed;
3603                 }
3604         }
3605
3606         if (adev->is_atom_fw) {
3607                 /* Initialize clocks */
3608                 r = amdgpu_atomfirmware_get_clock_info(adev);
3609                 if (r) {
3610                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3611                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3612                         goto failed;
3613                 }
3614         } else {
3615                 /* Initialize clocks */
3616                 r = amdgpu_atombios_get_clock_info(adev);
3617                 if (r) {
3618                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3619                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3620                         goto failed;
3621                 }
3622                 /* init i2c buses */
3623                 if (!amdgpu_device_has_dc_support(adev))
3624                         amdgpu_atombios_i2c_init(adev);
3625         }
3626
3627 fence_driver_init:
3628         /* Fence driver */
3629         r = amdgpu_fence_driver_sw_init(adev);
3630         if (r) {
3631                 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3632                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3633                 goto failed;
3634         }
3635
3636         /* init the mode config */
3637         drm_mode_config_init(adev_to_drm(adev));
3638
3639         r = amdgpu_device_ip_init(adev);
3640         if (r) {
3641                 /* failed in exclusive mode due to timeout */
3642                 if (amdgpu_sriov_vf(adev) &&
3643                     !amdgpu_sriov_runtime(adev) &&
3644                     amdgpu_virt_mmio_blocked(adev) &&
3645                     !amdgpu_virt_wait_reset(adev)) {
3646                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3647                         /* Don't send request since VF is inactive. */
3648                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3649                         adev->virt.ops = NULL;
3650                         r = -EAGAIN;
3651                         goto release_ras_con;
3652                 }
3653                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3654                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3655                 goto release_ras_con;
3656         }
3657
3658         amdgpu_fence_driver_hw_init(adev);
3659
3660         dev_info(adev->dev,
3661                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3662                         adev->gfx.config.max_shader_engines,
3663                         adev->gfx.config.max_sh_per_se,
3664                         adev->gfx.config.max_cu_per_sh,
3665                         adev->gfx.cu_info.number);
3666
3667         adev->accel_working = true;
3668
3669         amdgpu_vm_check_compute_bug(adev);
3670
3671         /* Initialize the buffer migration limit. */
3672         if (amdgpu_moverate >= 0)
3673                 max_MBps = amdgpu_moverate;
3674         else
3675                 max_MBps = 8; /* Allow 8 MB/s. */
3676         /* Get a log2 for easy divisions. */
3677         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3678
3679         amdgpu_fbdev_init(adev);
3680
3681         r = amdgpu_pm_sysfs_init(adev);
3682         if (r) {
3683                 adev->pm_sysfs_en = false;
3684                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3685         } else
3686                 adev->pm_sysfs_en = true;
3687
3688         r = amdgpu_ucode_sysfs_init(adev);
3689         if (r) {
3690                 adev->ucode_sysfs_en = false;
3691                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3692         } else
3693                 adev->ucode_sysfs_en = true;
3694
3695         if ((amdgpu_testing & 1)) {
3696                 if (adev->accel_working)
3697                         amdgpu_test_moves(adev);
3698                 else
3699                         DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3700         }
3701         if (amdgpu_benchmarking) {
3702                 if (adev->accel_working)
3703                         amdgpu_benchmark(adev, amdgpu_benchmarking);
3704                 else
3705                         DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3706         }
3707
3708         /*
3709          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3710          * Otherwise the mgpu fan boost feature will be skipped due to the
3711          * gpu instance is counted less.
3712          */
3713         amdgpu_register_gpu_instance(adev);
3714
3715         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3716          * explicit gating rather than handling it automatically.
3717          */
3718         if (!adev->gmc.xgmi.pending_reset) {
3719                 r = amdgpu_device_ip_late_init(adev);
3720                 if (r) {
3721                         dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3722                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3723                         goto release_ras_con;
3724                 }
3725                 /* must succeed. */
3726                 amdgpu_ras_resume(adev);
3727                 queue_delayed_work(system_wq, &adev->delayed_init_work,
3728                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3729         }
3730
3731         if (amdgpu_sriov_vf(adev))
3732                 flush_delayed_work(&adev->delayed_init_work);
3733
3734         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3735         if (r)
3736                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3737
3738         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3739                 r = amdgpu_pmu_init(adev);
3740         if (r)
3741                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3742
3743         /* Have stored pci confspace at hand for restore in sudden PCI error */
3744         if (amdgpu_device_cache_pci_state(adev->pdev))
3745                 pci_restore_state(pdev);
3746
3747         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3748         /* this will fail for cards that aren't VGA class devices, just
3749          * ignore it */
3750         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3751                 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3752
3753         if (amdgpu_device_supports_px(ddev)) {
3754                 px = true;
3755                 vga_switcheroo_register_client(adev->pdev,
3756                                                &amdgpu_switcheroo_ops, px);
3757                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3758         }
3759
3760         if (adev->gmc.xgmi.pending_reset)
3761                 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3762                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3763
3764         return 0;
3765
3766 release_ras_con:
3767         amdgpu_release_ras_context(adev);
3768
3769 failed:
3770         amdgpu_vf_error_trans_all(adev);
3771
3772         return r;
3773 }
3774
3775 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3776 {
3777         /* Clear all CPU mappings pointing to this device */
3778         unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3779
3780         /* Unmap all mapped bars - Doorbell, registers and VRAM */
3781         amdgpu_device_doorbell_fini(adev);
3782
3783         iounmap(adev->rmmio);
3784         adev->rmmio = NULL;
3785         if (adev->mman.aper_base_kaddr)
3786                 iounmap(adev->mman.aper_base_kaddr);
3787         adev->mman.aper_base_kaddr = NULL;
3788
3789         /* Memory manager related */
3790         if (!adev->gmc.xgmi.connected_to_cpu) {
3791                 arch_phys_wc_del(adev->gmc.vram_mtrr);
3792                 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3793         }
3794 }
3795
3796 /**
3797  * amdgpu_device_fini - tear down the driver
3798  *
3799  * @adev: amdgpu_device pointer
3800  *
3801  * Tear down the driver info (all asics).
3802  * Called at driver shutdown.
3803  */
3804 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3805 {
3806         dev_info(adev->dev, "amdgpu: finishing device.\n");
3807         flush_delayed_work(&adev->delayed_init_work);
3808         if (adev->mman.initialized) {
3809                 flush_delayed_work(&adev->mman.bdev.wq);
3810                 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3811         }
3812         adev->shutdown = true;
3813
3814         /* make sure IB test finished before entering exclusive mode
3815          * to avoid preemption on IB test
3816          * */
3817         if (amdgpu_sriov_vf(adev)) {
3818                 amdgpu_virt_request_full_gpu(adev, false);
3819                 amdgpu_virt_fini_data_exchange(adev);
3820         }
3821
3822         /* disable all interrupts */
3823         amdgpu_irq_disable_all(adev);
3824         if (adev->mode_info.mode_config_initialized){
3825                 if (!amdgpu_device_has_dc_support(adev))
3826                         drm_helper_force_disable_all(adev_to_drm(adev));
3827                 else
3828                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3829         }
3830         amdgpu_fence_driver_hw_fini(adev);
3831
3832         if (adev->pm_sysfs_en)
3833                 amdgpu_pm_sysfs_fini(adev);
3834         if (adev->ucode_sysfs_en)
3835                 amdgpu_ucode_sysfs_fini(adev);
3836         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3837
3838         amdgpu_fbdev_fini(adev);
3839
3840         amdgpu_irq_fini_hw(adev);
3841
3842         amdgpu_device_ip_fini_early(adev);
3843
3844         ttm_device_clear_dma_mappings(&adev->mman.bdev);
3845
3846         amdgpu_gart_dummy_page_fini(adev);
3847
3848         amdgpu_device_unmap_mmio(adev);
3849 }
3850
3851 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3852 {
3853         amdgpu_device_ip_fini(adev);
3854         amdgpu_fence_driver_sw_fini(adev);
3855         release_firmware(adev->firmware.gpu_info_fw);
3856         adev->firmware.gpu_info_fw = NULL;
3857         adev->accel_working = false;
3858
3859         amdgpu_reset_fini(adev);
3860
3861         /* free i2c buses */
3862         if (!amdgpu_device_has_dc_support(adev))
3863                 amdgpu_i2c_fini(adev);
3864
3865         if (amdgpu_emu_mode != 1)
3866                 amdgpu_atombios_fini(adev);
3867
3868         kfree(adev->bios);
3869         adev->bios = NULL;
3870         if (amdgpu_device_supports_px(adev_to_drm(adev))) {
3871                 vga_switcheroo_unregister_client(adev->pdev);
3872                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3873         }
3874         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3875                 vga_client_unregister(adev->pdev);
3876
3877         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3878                 amdgpu_pmu_fini(adev);
3879         if (adev->mman.discovery_bin)
3880                 amdgpu_discovery_fini(adev);
3881
3882         kfree(adev->pci_state);
3883
3884 }
3885
3886
3887 /*
3888  * Suspend & resume.
3889  */
3890 /**
3891  * amdgpu_device_suspend - initiate device suspend
3892  *
3893  * @dev: drm dev pointer
3894  * @fbcon : notify the fbdev of suspend
3895  *
3896  * Puts the hw in the suspend state (all asics).
3897  * Returns 0 for success or an error on failure.
3898  * Called at driver suspend.
3899  */
3900 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3901 {
3902         struct amdgpu_device *adev = drm_to_adev(dev);
3903
3904         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3905                 return 0;
3906
3907         adev->in_suspend = true;
3908
3909         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
3910                 DRM_WARN("smart shift update failed\n");
3911
3912         drm_kms_helper_poll_disable(dev);
3913
3914         if (fbcon)
3915                 amdgpu_fbdev_set_suspend(adev, 1);
3916
3917         cancel_delayed_work_sync(&adev->delayed_init_work);
3918
3919         amdgpu_ras_suspend(adev);
3920
3921         amdgpu_device_ip_suspend_phase1(adev);
3922
3923         if (!adev->in_s0ix)
3924                 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
3925
3926         /* evict vram memory */
3927         amdgpu_bo_evict_vram(adev);
3928
3929         amdgpu_fence_driver_hw_fini(adev);
3930
3931         amdgpu_device_ip_suspend_phase2(adev);
3932         /* evict remaining vram memory
3933          * This second call to evict vram is to evict the gart page table
3934          * using the CPU.
3935          */
3936         amdgpu_bo_evict_vram(adev);
3937
3938         return 0;
3939 }
3940
3941 /**
3942  * amdgpu_device_resume - initiate device resume
3943  *
3944  * @dev: drm dev pointer
3945  * @fbcon : notify the fbdev of resume
3946  *
3947  * Bring the hw back to operating state (all asics).
3948  * Returns 0 for success or an error on failure.
3949  * Called at driver resume.
3950  */
3951 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3952 {
3953         struct amdgpu_device *adev = drm_to_adev(dev);
3954         int r = 0;
3955
3956         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3957                 return 0;
3958
3959         if (adev->in_s0ix)
3960                 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
3961
3962         /* post card */
3963         if (amdgpu_device_need_post(adev)) {
3964                 r = amdgpu_device_asic_init(adev);
3965                 if (r)
3966                         dev_err(adev->dev, "amdgpu asic init failed\n");
3967         }
3968
3969         r = amdgpu_device_ip_resume(adev);
3970         if (r) {
3971                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3972                 return r;
3973         }
3974         amdgpu_fence_driver_hw_init(adev);
3975
3976         r = amdgpu_device_ip_late_init(adev);
3977         if (r)
3978                 return r;
3979
3980         queue_delayed_work(system_wq, &adev->delayed_init_work,
3981                            msecs_to_jiffies(AMDGPU_RESUME_MS));
3982
3983         if (!adev->in_s0ix) {
3984                 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
3985                 if (r)
3986                         return r;
3987         }
3988
3989         /* Make sure IB tests flushed */
3990         flush_delayed_work(&adev->delayed_init_work);
3991
3992         if (fbcon)
3993                 amdgpu_fbdev_set_suspend(adev, 0);
3994
3995         drm_kms_helper_poll_enable(dev);
3996
3997         amdgpu_ras_resume(adev);
3998
3999         /*
4000          * Most of the connector probing functions try to acquire runtime pm
4001          * refs to ensure that the GPU is powered on when connector polling is
4002          * performed. Since we're calling this from a runtime PM callback,
4003          * trying to acquire rpm refs will cause us to deadlock.
4004          *
4005          * Since we're guaranteed to be holding the rpm lock, it's safe to
4006          * temporarily disable the rpm helpers so this doesn't deadlock us.
4007          */
4008 #ifdef CONFIG_PM
4009         dev->dev->power.disable_depth++;
4010 #endif
4011         if (!amdgpu_device_has_dc_support(adev))
4012                 drm_helper_hpd_irq_event(dev);
4013         else
4014                 drm_kms_helper_hotplug_event(dev);
4015 #ifdef CONFIG_PM
4016         dev->dev->power.disable_depth--;
4017 #endif
4018         adev->in_suspend = false;
4019
4020         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4021                 DRM_WARN("smart shift update failed\n");
4022
4023         return 0;
4024 }
4025
4026 /**
4027  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4028  *
4029  * @adev: amdgpu_device pointer
4030  *
4031  * The list of all the hardware IPs that make up the asic is walked and
4032  * the check_soft_reset callbacks are run.  check_soft_reset determines
4033  * if the asic is still hung or not.
4034  * Returns true if any of the IPs are still in a hung state, false if not.
4035  */
4036 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4037 {
4038         int i;
4039         bool asic_hang = false;
4040
4041         if (amdgpu_sriov_vf(adev))
4042                 return true;
4043
4044         if (amdgpu_asic_need_full_reset(adev))
4045                 return true;
4046
4047         for (i = 0; i < adev->num_ip_blocks; i++) {
4048                 if (!adev->ip_blocks[i].status.valid)
4049                         continue;
4050                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4051                         adev->ip_blocks[i].status.hang =
4052                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4053                 if (adev->ip_blocks[i].status.hang) {
4054                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4055                         asic_hang = true;
4056                 }
4057         }
4058         return asic_hang;
4059 }
4060
4061 /**
4062  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4063  *
4064  * @adev: amdgpu_device pointer
4065  *
4066  * The list of all the hardware IPs that make up the asic is walked and the
4067  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4068  * handles any IP specific hardware or software state changes that are
4069  * necessary for a soft reset to succeed.
4070  * Returns 0 on success, negative error code on failure.
4071  */
4072 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4073 {
4074         int i, r = 0;
4075
4076         for (i = 0; i < adev->num_ip_blocks; i++) {
4077                 if (!adev->ip_blocks[i].status.valid)
4078                         continue;
4079                 if (adev->ip_blocks[i].status.hang &&
4080                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4081                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4082                         if (r)
4083                                 return r;
4084                 }
4085         }
4086
4087         return 0;
4088 }
4089
4090 /**
4091  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4092  *
4093  * @adev: amdgpu_device pointer
4094  *
4095  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4096  * reset is necessary to recover.
4097  * Returns true if a full asic reset is required, false if not.
4098  */
4099 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4100 {
4101         int i;
4102
4103         if (amdgpu_asic_need_full_reset(adev))
4104                 return true;
4105
4106         for (i = 0; i < adev->num_ip_blocks; i++) {
4107                 if (!adev->ip_blocks[i].status.valid)
4108                         continue;
4109                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4110                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4111                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4112                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4113                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4114                         if (adev->ip_blocks[i].status.hang) {
4115                                 dev_info(adev->dev, "Some block need full reset!\n");
4116                                 return true;
4117                         }
4118                 }
4119         }
4120         return false;
4121 }
4122
4123 /**
4124  * amdgpu_device_ip_soft_reset - do a soft reset
4125  *
4126  * @adev: amdgpu_device pointer
4127  *
4128  * The list of all the hardware IPs that make up the asic is walked and the
4129  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4130  * IP specific hardware or software state changes that are necessary to soft
4131  * reset the IP.
4132  * Returns 0 on success, negative error code on failure.
4133  */
4134 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4135 {
4136         int i, r = 0;
4137
4138         for (i = 0; i < adev->num_ip_blocks; i++) {
4139                 if (!adev->ip_blocks[i].status.valid)
4140                         continue;
4141                 if (adev->ip_blocks[i].status.hang &&
4142                     adev->ip_blocks[i].version->funcs->soft_reset) {
4143                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4144                         if (r)
4145                                 return r;
4146                 }
4147         }
4148
4149         return 0;
4150 }
4151
4152 /**
4153  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4154  *
4155  * @adev: amdgpu_device pointer
4156  *
4157  * The list of all the hardware IPs that make up the asic is walked and the
4158  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4159  * handles any IP specific hardware or software state changes that are
4160  * necessary after the IP has been soft reset.
4161  * Returns 0 on success, negative error code on failure.
4162  */
4163 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4164 {
4165         int i, r = 0;
4166
4167         for (i = 0; i < adev->num_ip_blocks; i++) {
4168                 if (!adev->ip_blocks[i].status.valid)
4169                         continue;
4170                 if (adev->ip_blocks[i].status.hang &&
4171                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4172                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4173                 if (r)
4174                         return r;
4175         }
4176
4177         return 0;
4178 }
4179
4180 /**
4181  * amdgpu_device_recover_vram - Recover some VRAM contents
4182  *
4183  * @adev: amdgpu_device pointer
4184  *
4185  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4186  * restore things like GPUVM page tables after a GPU reset where
4187  * the contents of VRAM might be lost.
4188  *
4189  * Returns:
4190  * 0 on success, negative error code on failure.
4191  */
4192 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4193 {
4194         struct dma_fence *fence = NULL, *next = NULL;
4195         struct amdgpu_bo *shadow;
4196         struct amdgpu_bo_vm *vmbo;
4197         long r = 1, tmo;
4198
4199         if (amdgpu_sriov_runtime(adev))
4200                 tmo = msecs_to_jiffies(8000);
4201         else
4202                 tmo = msecs_to_jiffies(100);
4203
4204         dev_info(adev->dev, "recover vram bo from shadow start\n");
4205         mutex_lock(&adev->shadow_list_lock);
4206         list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4207                 shadow = &vmbo->bo;
4208                 /* No need to recover an evicted BO */
4209                 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4210                     shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4211                     shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4212                         continue;
4213
4214                 r = amdgpu_bo_restore_shadow(shadow, &next);
4215                 if (r)
4216                         break;
4217
4218                 if (fence) {
4219                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4220                         dma_fence_put(fence);
4221                         fence = next;
4222                         if (tmo == 0) {
4223                                 r = -ETIMEDOUT;
4224                                 break;
4225                         } else if (tmo < 0) {
4226                                 r = tmo;
4227                                 break;
4228                         }
4229                 } else {
4230                         fence = next;
4231                 }
4232         }
4233         mutex_unlock(&adev->shadow_list_lock);
4234
4235         if (fence)
4236                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4237         dma_fence_put(fence);
4238
4239         if (r < 0 || tmo <= 0) {
4240                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4241                 return -EIO;
4242         }
4243
4244         dev_info(adev->dev, "recover vram bo from shadow done\n");
4245         return 0;
4246 }
4247
4248
4249 /**
4250  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4251  *
4252  * @adev: amdgpu_device pointer
4253  * @from_hypervisor: request from hypervisor
4254  *
4255  * do VF FLR and reinitialize Asic
4256  * return 0 means succeeded otherwise failed
4257  */
4258 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4259                                      bool from_hypervisor)
4260 {
4261         int r;
4262
4263         if (from_hypervisor)
4264                 r = amdgpu_virt_request_full_gpu(adev, true);
4265         else
4266                 r = amdgpu_virt_reset_gpu(adev);
4267         if (r)
4268                 return r;
4269
4270         amdgpu_amdkfd_pre_reset(adev);
4271
4272         /* Resume IP prior to SMC */
4273         r = amdgpu_device_ip_reinit_early_sriov(adev);
4274         if (r)
4275                 goto error;
4276
4277         amdgpu_virt_init_data_exchange(adev);
4278         /* we need recover gart prior to run SMC/CP/SDMA resume */
4279         amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
4280
4281         r = amdgpu_device_fw_loading(adev);
4282         if (r)
4283                 return r;
4284
4285         /* now we are okay to resume SMC/CP/SDMA */
4286         r = amdgpu_device_ip_reinit_late_sriov(adev);
4287         if (r)
4288                 goto error;
4289
4290         amdgpu_irq_gpu_reset_resume_helper(adev);
4291         r = amdgpu_ib_ring_tests(adev);
4292         amdgpu_amdkfd_post_reset(adev);
4293
4294 error:
4295         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4296                 amdgpu_inc_vram_lost(adev);
4297                 r = amdgpu_device_recover_vram(adev);
4298         }
4299         amdgpu_virt_release_full_gpu(adev, true);
4300
4301         return r;
4302 }
4303
4304 /**
4305  * amdgpu_device_has_job_running - check if there is any job in mirror list
4306  *
4307  * @adev: amdgpu_device pointer
4308  *
4309  * check if there is any job in mirror list
4310  */
4311 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4312 {
4313         int i;
4314         struct drm_sched_job *job;
4315
4316         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4317                 struct amdgpu_ring *ring = adev->rings[i];
4318
4319                 if (!ring || !ring->sched.thread)
4320                         continue;
4321
4322                 spin_lock(&ring->sched.job_list_lock);
4323                 job = list_first_entry_or_null(&ring->sched.pending_list,
4324                                                struct drm_sched_job, list);
4325                 spin_unlock(&ring->sched.job_list_lock);
4326                 if (job)
4327                         return true;
4328         }
4329         return false;
4330 }
4331
4332 /**
4333  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4334  *
4335  * @adev: amdgpu_device pointer
4336  *
4337  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4338  * a hung GPU.
4339  */
4340 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4341 {
4342         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4343                 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4344                 return false;
4345         }
4346
4347         if (amdgpu_gpu_recovery == 0)
4348                 goto disabled;
4349
4350         if (amdgpu_sriov_vf(adev))
4351                 return true;
4352
4353         if (amdgpu_gpu_recovery == -1) {
4354                 switch (adev->asic_type) {
4355                 case CHIP_BONAIRE:
4356                 case CHIP_HAWAII:
4357                 case CHIP_TOPAZ:
4358                 case CHIP_TONGA:
4359                 case CHIP_FIJI:
4360                 case CHIP_POLARIS10:
4361                 case CHIP_POLARIS11:
4362                 case CHIP_POLARIS12:
4363                 case CHIP_VEGAM:
4364                 case CHIP_VEGA20:
4365                 case CHIP_VEGA10:
4366                 case CHIP_VEGA12:
4367                 case CHIP_RAVEN:
4368                 case CHIP_ARCTURUS:
4369                 case CHIP_RENOIR:
4370                 case CHIP_NAVI10:
4371                 case CHIP_NAVI14:
4372                 case CHIP_NAVI12:
4373                 case CHIP_SIENNA_CICHLID:
4374                 case CHIP_NAVY_FLOUNDER:
4375                 case CHIP_DIMGREY_CAVEFISH:
4376                 case CHIP_BEIGE_GOBY:
4377                 case CHIP_VANGOGH:
4378                 case CHIP_ALDEBARAN:
4379                         break;
4380                 default:
4381                         goto disabled;
4382                 }
4383         }
4384
4385         return true;
4386
4387 disabled:
4388                 dev_info(adev->dev, "GPU recovery disabled.\n");
4389                 return false;
4390 }
4391
4392 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4393 {
4394         u32 i;
4395         int ret = 0;
4396
4397         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4398
4399         dev_info(adev->dev, "GPU mode1 reset\n");
4400
4401         /* disable BM */
4402         pci_clear_master(adev->pdev);
4403
4404         amdgpu_device_cache_pci_state(adev->pdev);
4405
4406         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4407                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4408                 ret = amdgpu_dpm_mode1_reset(adev);
4409         } else {
4410                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4411                 ret = psp_gpu_reset(adev);
4412         }
4413
4414         if (ret)
4415                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4416
4417         amdgpu_device_load_pci_state(adev->pdev);
4418
4419         /* wait for asic to come out of reset */
4420         for (i = 0; i < adev->usec_timeout; i++) {
4421                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4422
4423                 if (memsize != 0xffffffff)
4424                         break;
4425                 udelay(1);
4426         }
4427
4428         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4429         return ret;
4430 }
4431
4432 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4433                                  struct amdgpu_reset_context *reset_context)
4434 {
4435         int i, j, r = 0;
4436         struct amdgpu_job *job = NULL;
4437         bool need_full_reset =
4438                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4439
4440         if (reset_context->reset_req_dev == adev)
4441                 job = reset_context->job;
4442
4443         if (amdgpu_sriov_vf(adev)) {
4444                 /* stop the data exchange thread */
4445                 amdgpu_virt_fini_data_exchange(adev);
4446         }
4447
4448         /* block all schedulers and reset given job's ring */
4449         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4450                 struct amdgpu_ring *ring = adev->rings[i];
4451
4452                 if (!ring || !ring->sched.thread)
4453                         continue;
4454
4455                 /*clear job fence from fence drv to avoid force_completion
4456                  *leave NULL and vm flush fence in fence drv */
4457                 for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
4458                         struct dma_fence *old, **ptr;
4459
4460                         ptr = &ring->fence_drv.fences[j];
4461                         old = rcu_dereference_protected(*ptr, 1);
4462                         if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
4463                                 RCU_INIT_POINTER(*ptr, NULL);
4464                         }
4465                 }
4466                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4467                 amdgpu_fence_driver_force_completion(ring);
4468         }
4469
4470         if (job && job->vm)
4471                 drm_sched_increase_karma(&job->base);
4472
4473         r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4474         /* If reset handler not implemented, continue; otherwise return */
4475         if (r == -ENOSYS)
4476                 r = 0;
4477         else
4478                 return r;
4479
4480         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4481         if (!amdgpu_sriov_vf(adev)) {
4482
4483                 if (!need_full_reset)
4484                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4485
4486                 if (!need_full_reset) {
4487                         amdgpu_device_ip_pre_soft_reset(adev);
4488                         r = amdgpu_device_ip_soft_reset(adev);
4489                         amdgpu_device_ip_post_soft_reset(adev);
4490                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4491                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4492                                 need_full_reset = true;
4493                         }
4494                 }
4495
4496                 if (need_full_reset)
4497                         r = amdgpu_device_ip_suspend(adev);
4498                 if (need_full_reset)
4499                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4500                 else
4501                         clear_bit(AMDGPU_NEED_FULL_RESET,
4502                                   &reset_context->flags);
4503         }
4504
4505         return r;
4506 }
4507
4508 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4509                          struct amdgpu_reset_context *reset_context)
4510 {
4511         struct amdgpu_device *tmp_adev = NULL;
4512         bool need_full_reset, skip_hw_reset, vram_lost = false;
4513         int r = 0;
4514
4515         /* Try reset handler method first */
4516         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4517                                     reset_list);
4518         r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4519         /* If reset handler not implemented, continue; otherwise return */
4520         if (r == -ENOSYS)
4521                 r = 0;
4522         else
4523                 return r;
4524
4525         /* Reset handler not implemented, use the default method */
4526         need_full_reset =
4527                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4528         skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4529
4530         /*
4531          * ASIC reset has to be done on all XGMI hive nodes ASAP
4532          * to allow proper links negotiation in FW (within 1 sec)
4533          */
4534         if (!skip_hw_reset && need_full_reset) {
4535                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4536                         /* For XGMI run all resets in parallel to speed up the process */
4537                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4538                                 tmp_adev->gmc.xgmi.pending_reset = false;
4539                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4540                                         r = -EALREADY;
4541                         } else
4542                                 r = amdgpu_asic_reset(tmp_adev);
4543
4544                         if (r) {
4545                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4546                                          r, adev_to_drm(tmp_adev)->unique);
4547                                 break;
4548                         }
4549                 }
4550
4551                 /* For XGMI wait for all resets to complete before proceed */
4552                 if (!r) {
4553                         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4554                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4555                                         flush_work(&tmp_adev->xgmi_reset_work);
4556                                         r = tmp_adev->asic_reset_res;
4557                                         if (r)
4558                                                 break;
4559                                 }
4560                         }
4561                 }
4562         }
4563
4564         if (!r && amdgpu_ras_intr_triggered()) {
4565                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4566                         if (tmp_adev->mmhub.ras_funcs &&
4567                             tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
4568                                 tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
4569                 }
4570
4571                 amdgpu_ras_intr_cleared();
4572         }
4573
4574         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4575                 if (need_full_reset) {
4576                         /* post card */
4577                         r = amdgpu_device_asic_init(tmp_adev);
4578                         if (r) {
4579                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4580                         } else {
4581                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4582                                 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4583                                 if (r)
4584                                         goto out;
4585
4586                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4587                                 if (r)
4588                                         goto out;
4589
4590                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4591                                 if (vram_lost) {
4592                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4593                                         amdgpu_inc_vram_lost(tmp_adev);
4594                                 }
4595
4596                                 r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
4597                                 if (r)
4598                                         goto out;
4599
4600                                 r = amdgpu_device_fw_loading(tmp_adev);
4601                                 if (r)
4602                                         return r;
4603
4604                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4605                                 if (r)
4606                                         goto out;
4607
4608                                 if (vram_lost)
4609                                         amdgpu_device_fill_reset_magic(tmp_adev);
4610
4611                                 /*
4612                                  * Add this ASIC as tracked as reset was already
4613                                  * complete successfully.
4614                                  */
4615                                 amdgpu_register_gpu_instance(tmp_adev);
4616
4617                                 if (!reset_context->hive &&
4618                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4619                                         amdgpu_xgmi_add_device(tmp_adev);
4620
4621                                 r = amdgpu_device_ip_late_init(tmp_adev);
4622                                 if (r)
4623                                         goto out;
4624
4625                                 amdgpu_fbdev_set_suspend(tmp_adev, 0);
4626
4627                                 /*
4628                                  * The GPU enters bad state once faulty pages
4629                                  * by ECC has reached the threshold, and ras
4630                                  * recovery is scheduled next. So add one check
4631                                  * here to break recovery if it indeed exceeds
4632                                  * bad page threshold, and remind user to
4633                                  * retire this GPU or setting one bigger
4634                                  * bad_page_threshold value to fix this once
4635                                  * probing driver again.
4636                                  */
4637                                 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4638                                         /* must succeed. */
4639                                         amdgpu_ras_resume(tmp_adev);
4640                                 } else {
4641                                         r = -EINVAL;
4642                                         goto out;
4643                                 }
4644
4645                                 /* Update PSP FW topology after reset */
4646                                 if (reset_context->hive &&
4647                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4648                                         r = amdgpu_xgmi_update_topology(
4649                                                 reset_context->hive, tmp_adev);
4650                         }
4651                 }
4652
4653 out:
4654                 if (!r) {
4655                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4656                         r = amdgpu_ib_ring_tests(tmp_adev);
4657                         if (r) {
4658                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4659                                 need_full_reset = true;
4660                                 r = -EAGAIN;
4661                                 goto end;
4662                         }
4663                 }
4664
4665                 if (!r)
4666                         r = amdgpu_device_recover_vram(tmp_adev);
4667                 else
4668                         tmp_adev->asic_reset_res = r;
4669         }
4670
4671 end:
4672         if (need_full_reset)
4673                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4674         else
4675                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4676         return r;
4677 }
4678
4679 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4680                                 struct amdgpu_hive_info *hive)
4681 {
4682         if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4683                 return false;
4684
4685         if (hive) {
4686                 down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4687         } else {
4688                 down_write(&adev->reset_sem);
4689         }
4690
4691         switch (amdgpu_asic_reset_method(adev)) {
4692         case AMD_RESET_METHOD_MODE1:
4693                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4694                 break;
4695         case AMD_RESET_METHOD_MODE2:
4696                 adev->mp1_state = PP_MP1_STATE_RESET;
4697                 break;
4698         default:
4699                 adev->mp1_state = PP_MP1_STATE_NONE;
4700                 break;
4701         }
4702
4703         return true;
4704 }
4705
4706 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4707 {
4708         amdgpu_vf_error_trans_all(adev);
4709         adev->mp1_state = PP_MP1_STATE_NONE;
4710         atomic_set(&adev->in_gpu_reset, 0);
4711         up_write(&adev->reset_sem);
4712 }
4713
4714 /*
4715  * to lockup a list of amdgpu devices in a hive safely, if not a hive
4716  * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4717  *
4718  * unlock won't require roll back.
4719  */
4720 static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4721 {
4722         struct amdgpu_device *tmp_adev = NULL;
4723
4724         if (adev->gmc.xgmi.num_physical_nodes > 1) {
4725                 if (!hive) {
4726                         dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4727                         return -ENODEV;
4728                 }
4729                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4730                         if (!amdgpu_device_lock_adev(tmp_adev, hive))
4731                                 goto roll_back;
4732                 }
4733         } else if (!amdgpu_device_lock_adev(adev, hive))
4734                 return -EAGAIN;
4735
4736         return 0;
4737 roll_back:
4738         if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4739                 /*
4740                  * if the lockup iteration break in the middle of a hive,
4741                  * it may means there may has a race issue,
4742                  * or a hive device locked up independently.
4743                  * we may be in trouble and may not, so will try to roll back
4744                  * the lock and give out a warnning.
4745                  */
4746                 dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4747                 list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4748                         amdgpu_device_unlock_adev(tmp_adev);
4749                 }
4750         }
4751         return -EAGAIN;
4752 }
4753
4754 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4755 {
4756         struct pci_dev *p = NULL;
4757
4758         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4759                         adev->pdev->bus->number, 1);
4760         if (p) {
4761                 pm_runtime_enable(&(p->dev));
4762                 pm_runtime_resume(&(p->dev));
4763         }
4764 }
4765
4766 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4767 {
4768         enum amd_reset_method reset_method;
4769         struct pci_dev *p = NULL;
4770         u64 expires;
4771
4772         /*
4773          * For now, only BACO and mode1 reset are confirmed
4774          * to suffer the audio issue without proper suspended.
4775          */
4776         reset_method = amdgpu_asic_reset_method(adev);
4777         if ((reset_method != AMD_RESET_METHOD_BACO) &&
4778              (reset_method != AMD_RESET_METHOD_MODE1))
4779                 return -EINVAL;
4780
4781         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4782                         adev->pdev->bus->number, 1);
4783         if (!p)
4784                 return -ENODEV;
4785
4786         expires = pm_runtime_autosuspend_expiration(&(p->dev));
4787         if (!expires)
4788                 /*
4789                  * If we cannot get the audio device autosuspend delay,
4790                  * a fixed 4S interval will be used. Considering 3S is
4791                  * the audio controller default autosuspend delay setting.
4792                  * 4S used here is guaranteed to cover that.
4793                  */
4794                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4795
4796         while (!pm_runtime_status_suspended(&(p->dev))) {
4797                 if (!pm_runtime_suspend(&(p->dev)))
4798                         break;
4799
4800                 if (expires < ktime_get_mono_fast_ns()) {
4801                         dev_warn(adev->dev, "failed to suspend display audio\n");
4802                         /* TODO: abort the succeeding gpu reset? */
4803                         return -ETIMEDOUT;
4804                 }
4805         }
4806
4807         pm_runtime_disable(&(p->dev));
4808
4809         return 0;
4810 }
4811
4812 static void amdgpu_device_recheck_guilty_jobs(
4813         struct amdgpu_device *adev, struct list_head *device_list_handle,
4814         struct amdgpu_reset_context *reset_context)
4815 {
4816         int i, r = 0;
4817
4818         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4819                 struct amdgpu_ring *ring = adev->rings[i];
4820                 int ret = 0;
4821                 struct drm_sched_job *s_job;
4822
4823                 if (!ring || !ring->sched.thread)
4824                         continue;
4825
4826                 s_job = list_first_entry_or_null(&ring->sched.pending_list,
4827                                 struct drm_sched_job, list);
4828                 if (s_job == NULL)
4829                         continue;
4830
4831                 /* clear job's guilty and depend the folowing step to decide the real one */
4832                 drm_sched_reset_karma(s_job);
4833                 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
4834
4835                 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
4836                 if (ret == 0) { /* timeout */
4837                         DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
4838                                                 ring->sched.name, s_job->id);
4839
4840                         /* set guilty */
4841                         drm_sched_increase_karma(s_job);
4842 retry:
4843                         /* do hw reset */
4844                         if (amdgpu_sriov_vf(adev)) {
4845                                 amdgpu_virt_fini_data_exchange(adev);
4846                                 r = amdgpu_device_reset_sriov(adev, false);
4847                                 if (r)
4848                                         adev->asic_reset_res = r;
4849                         } else {
4850                                 clear_bit(AMDGPU_SKIP_HW_RESET,
4851                                           &reset_context->flags);
4852                                 r = amdgpu_do_asic_reset(device_list_handle,
4853                                                          reset_context);
4854                                 if (r && r == -EAGAIN)
4855                                         goto retry;
4856                         }
4857
4858                         /*
4859                          * add reset counter so that the following
4860                          * resubmitted job could flush vmid
4861                          */
4862                         atomic_inc(&adev->gpu_reset_counter);
4863                         continue;
4864                 }
4865
4866                 /* got the hw fence, signal finished fence */
4867                 atomic_dec(ring->sched.score);
4868                 dma_fence_get(&s_job->s_fence->finished);
4869                 dma_fence_signal(&s_job->s_fence->finished);
4870                 dma_fence_put(&s_job->s_fence->finished);
4871
4872                 /* remove node from list and free the job */
4873                 spin_lock(&ring->sched.job_list_lock);
4874                 list_del_init(&s_job->list);
4875                 spin_unlock(&ring->sched.job_list_lock);
4876                 ring->sched.ops->free_job(s_job);
4877         }
4878 }
4879
4880 /**
4881  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4882  *
4883  * @adev: amdgpu_device pointer
4884  * @job: which job trigger hang
4885  *
4886  * Attempt to reset the GPU if it has hung (all asics).
4887  * Attempt to do soft-reset or full-reset and reinitialize Asic
4888  * Returns 0 for success or an error on failure.
4889  */
4890
4891 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4892                               struct amdgpu_job *job)
4893 {
4894         struct list_head device_list, *device_list_handle =  NULL;
4895         bool job_signaled = false;
4896         struct amdgpu_hive_info *hive = NULL;
4897         struct amdgpu_device *tmp_adev = NULL;
4898         int i, r = 0;
4899         bool need_emergency_restart = false;
4900         bool audio_suspended = false;
4901         int tmp_vram_lost_counter;
4902         struct amdgpu_reset_context reset_context;
4903
4904         memset(&reset_context, 0, sizeof(reset_context));
4905
4906         /*
4907          * Special case: RAS triggered and full reset isn't supported
4908          */
4909         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4910
4911         /*
4912          * Flush RAM to disk so that after reboot
4913          * the user can read log and see why the system rebooted.
4914          */
4915         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4916                 DRM_WARN("Emergency reboot.");
4917
4918                 ksys_sync_helper();
4919                 emergency_restart();
4920         }
4921
4922         dev_info(adev->dev, "GPU %s begin!\n",
4923                 need_emergency_restart ? "jobs stop":"reset");
4924
4925         /*
4926          * Here we trylock to avoid chain of resets executing from
4927          * either trigger by jobs on different adevs in XGMI hive or jobs on
4928          * different schedulers for same device while this TO handler is running.
4929          * We always reset all schedulers for device and all devices for XGMI
4930          * hive so that should take care of them too.
4931          */
4932         hive = amdgpu_get_xgmi_hive(adev);
4933         if (hive) {
4934                 if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4935                         DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4936                                 job ? job->base.id : -1, hive->hive_id);
4937                         amdgpu_put_xgmi_hive(hive);
4938                         if (job && job->vm)
4939                                 drm_sched_increase_karma(&job->base);
4940                         return 0;
4941                 }
4942                 mutex_lock(&hive->hive_lock);
4943         }
4944
4945         reset_context.method = AMD_RESET_METHOD_NONE;
4946         reset_context.reset_req_dev = adev;
4947         reset_context.job = job;
4948         reset_context.hive = hive;
4949         clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
4950
4951         /*
4952          * lock the device before we try to operate the linked list
4953          * if didn't get the device lock, don't touch the linked list since
4954          * others may iterating it.
4955          */
4956         r = amdgpu_device_lock_hive_adev(adev, hive);
4957         if (r) {
4958                 dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4959                                         job ? job->base.id : -1);
4960
4961                 /* even we skipped this reset, still need to set the job to guilty */
4962                 if (job && job->vm)
4963                         drm_sched_increase_karma(&job->base);
4964                 goto skip_recovery;
4965         }
4966
4967         /*
4968          * Build list of devices to reset.
4969          * In case we are in XGMI hive mode, resort the device list
4970          * to put adev in the 1st position.
4971          */
4972         INIT_LIST_HEAD(&device_list);
4973         if (adev->gmc.xgmi.num_physical_nodes > 1) {
4974                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
4975                         list_add_tail(&tmp_adev->reset_list, &device_list);
4976                 if (!list_is_first(&adev->reset_list, &device_list))
4977                         list_rotate_to_front(&adev->reset_list, &device_list);
4978                 device_list_handle = &device_list;
4979         } else {
4980                 list_add_tail(&adev->reset_list, &device_list);
4981                 device_list_handle = &device_list;
4982         }
4983
4984         /* block all schedulers and reset given job's ring */
4985         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4986                 /*
4987                  * Try to put the audio codec into suspend state
4988                  * before gpu reset started.
4989                  *
4990                  * Due to the power domain of the graphics device
4991                  * is shared with AZ power domain. Without this,
4992                  * we may change the audio hardware from behind
4993                  * the audio driver's back. That will trigger
4994                  * some audio codec errors.
4995                  */
4996                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
4997                         audio_suspended = true;
4998
4999                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5000
5001                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5002
5003                 if (!amdgpu_sriov_vf(tmp_adev))
5004                         amdgpu_amdkfd_pre_reset(tmp_adev);
5005
5006                 /*
5007                  * Mark these ASICs to be reseted as untracked first
5008                  * And add them back after reset completed
5009                  */
5010                 amdgpu_unregister_gpu_instance(tmp_adev);
5011
5012                 amdgpu_fbdev_set_suspend(tmp_adev, 1);
5013
5014                 /* disable ras on ALL IPs */
5015                 if (!need_emergency_restart &&
5016                       amdgpu_device_ip_need_full_reset(tmp_adev))
5017                         amdgpu_ras_suspend(tmp_adev);
5018
5019                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5020                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5021
5022                         if (!ring || !ring->sched.thread)
5023                                 continue;
5024
5025                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5026
5027                         if (need_emergency_restart)
5028                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5029                 }
5030                 atomic_inc(&tmp_adev->gpu_reset_counter);
5031         }
5032
5033         if (need_emergency_restart)
5034                 goto skip_sched_resume;
5035
5036         /*
5037          * Must check guilty signal here since after this point all old
5038          * HW fences are force signaled.
5039          *
5040          * job->base holds a reference to parent fence
5041          */
5042         if (job && job->base.s_fence->parent &&
5043             dma_fence_is_signaled(job->base.s_fence->parent)) {
5044                 job_signaled = true;
5045                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5046                 goto skip_hw_reset;
5047         }
5048
5049 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
5050         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5051                 r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
5052                 /*TODO Should we stop ?*/
5053                 if (r) {
5054                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5055                                   r, adev_to_drm(tmp_adev)->unique);
5056                         tmp_adev->asic_reset_res = r;
5057                 }
5058         }
5059
5060         tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5061         /* Actual ASIC resets if needed.*/
5062         /* TODO Implement XGMI hive reset logic for SRIOV */
5063         if (amdgpu_sriov_vf(adev)) {
5064                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5065                 if (r)
5066                         adev->asic_reset_res = r;
5067         } else {
5068                 r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
5069                 if (r && r == -EAGAIN)
5070                         goto retry;
5071         }
5072
5073 skip_hw_reset:
5074
5075         /* Post ASIC reset for all devs .*/
5076         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5077
5078                 /*
5079                  * Sometimes a later bad compute job can block a good gfx job as gfx
5080                  * and compute ring share internal GC HW mutually. We add an additional
5081                  * guilty jobs recheck step to find the real guilty job, it synchronously
5082                  * submits and pends for the first job being signaled. If it gets timeout,
5083                  * we identify it as a real guilty job.
5084                  */
5085                 if (amdgpu_gpu_recovery == 2 &&
5086                         !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5087                         amdgpu_device_recheck_guilty_jobs(
5088                                 tmp_adev, device_list_handle, &reset_context);
5089
5090                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5091                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5092
5093                         if (!ring || !ring->sched.thread)
5094                                 continue;
5095
5096                         /* No point to resubmit jobs if we didn't HW reset*/
5097                         if (!tmp_adev->asic_reset_res && !job_signaled)
5098                                 drm_sched_resubmit_jobs(&ring->sched);
5099
5100                         drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5101                 }
5102
5103                 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
5104                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5105                 }
5106
5107                 tmp_adev->asic_reset_res = 0;
5108
5109                 if (r) {
5110                         /* bad news, how to tell it to userspace ? */
5111                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5112                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5113                 } else {
5114                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5115                         if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5116                                 DRM_WARN("smart shift update failed\n");
5117                 }
5118         }
5119
5120 skip_sched_resume:
5121         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5122                 /* unlock kfd: SRIOV would do it separately */
5123                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5124                         amdgpu_amdkfd_post_reset(tmp_adev);
5125
5126                 /* kfd_post_reset will do nothing if kfd device is not initialized,
5127                  * need to bring up kfd here if it's not be initialized before
5128                  */
5129                 if (!adev->kfd.init_complete)
5130                         amdgpu_amdkfd_device_init(adev);
5131
5132                 if (audio_suspended)
5133                         amdgpu_device_resume_display_audio(tmp_adev);
5134                 amdgpu_device_unlock_adev(tmp_adev);
5135         }
5136
5137 skip_recovery:
5138         if (hive) {
5139                 atomic_set(&hive->in_reset, 0);
5140                 mutex_unlock(&hive->hive_lock);
5141                 amdgpu_put_xgmi_hive(hive);
5142         }
5143
5144         if (r && r != -EAGAIN)
5145                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5146         return r;
5147 }
5148
5149 /**
5150  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5151  *
5152  * @adev: amdgpu_device pointer
5153  *
5154  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5155  * and lanes) of the slot the device is in. Handles APUs and
5156  * virtualized environments where PCIE config space may not be available.
5157  */
5158 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5159 {
5160         struct pci_dev *pdev;
5161         enum pci_bus_speed speed_cap, platform_speed_cap;
5162         enum pcie_link_width platform_link_width;
5163
5164         if (amdgpu_pcie_gen_cap)
5165                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5166
5167         if (amdgpu_pcie_lane_cap)
5168                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5169
5170         /* covers APUs as well */
5171         if (pci_is_root_bus(adev->pdev->bus)) {
5172                 if (adev->pm.pcie_gen_mask == 0)
5173                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5174                 if (adev->pm.pcie_mlw_mask == 0)
5175                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5176                 return;
5177         }
5178
5179         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5180                 return;
5181
5182         pcie_bandwidth_available(adev->pdev, NULL,
5183                                  &platform_speed_cap, &platform_link_width);
5184
5185         if (adev->pm.pcie_gen_mask == 0) {
5186                 /* asic caps */
5187                 pdev = adev->pdev;
5188                 speed_cap = pcie_get_speed_cap(pdev);
5189                 if (speed_cap == PCI_SPEED_UNKNOWN) {
5190                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5191                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5192                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5193                 } else {
5194                         if (speed_cap == PCIE_SPEED_32_0GT)
5195                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5196                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5197                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5198                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5199                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5200                         else if (speed_cap == PCIE_SPEED_16_0GT)
5201                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5202                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5203                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5204                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5205                         else if (speed_cap == PCIE_SPEED_8_0GT)
5206                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5207                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5208                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5209                         else if (speed_cap == PCIE_SPEED_5_0GT)
5210                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5211                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5212                         else
5213                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5214                 }
5215                 /* platform caps */
5216                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5217                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5218                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5219                 } else {
5220                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
5221                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5222                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5223                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5224                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5225                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5226                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5227                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5228                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5229                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5230                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5231                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5232                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5233                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5234                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5235                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5236                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5237                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5238                         else
5239                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5240
5241                 }
5242         }
5243         if (adev->pm.pcie_mlw_mask == 0) {
5244                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5245                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5246                 } else {
5247                         switch (platform_link_width) {
5248                         case PCIE_LNK_X32:
5249                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5250                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5251                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5252                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5253                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5254                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5255                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5256                                 break;
5257                         case PCIE_LNK_X16:
5258                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5259                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5260                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5261                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5262                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5263                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5264                                 break;
5265                         case PCIE_LNK_X12:
5266                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5267                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5268                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5269                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5270                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5271                                 break;
5272                         case PCIE_LNK_X8:
5273                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5274                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5275                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5276                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5277                                 break;
5278                         case PCIE_LNK_X4:
5279                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5280                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5281                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5282                                 break;
5283                         case PCIE_LNK_X2:
5284                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5285                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5286                                 break;
5287                         case PCIE_LNK_X1:
5288                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5289                                 break;
5290                         default:
5291                                 break;
5292                         }
5293                 }
5294         }
5295 }
5296
5297 int amdgpu_device_baco_enter(struct drm_device *dev)
5298 {
5299         struct amdgpu_device *adev = drm_to_adev(dev);
5300         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5301
5302         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5303                 return -ENOTSUPP;
5304
5305         if (ras && adev->ras_enabled &&
5306             adev->nbio.funcs->enable_doorbell_interrupt)
5307                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5308
5309         return amdgpu_dpm_baco_enter(adev);
5310 }
5311
5312 int amdgpu_device_baco_exit(struct drm_device *dev)
5313 {
5314         struct amdgpu_device *adev = drm_to_adev(dev);
5315         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5316         int ret = 0;
5317
5318         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5319                 return -ENOTSUPP;
5320
5321         ret = amdgpu_dpm_baco_exit(adev);
5322         if (ret)
5323                 return ret;
5324
5325         if (ras && adev->ras_enabled &&
5326             adev->nbio.funcs->enable_doorbell_interrupt)
5327                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5328
5329         if (amdgpu_passthrough(adev) &&
5330             adev->nbio.funcs->clear_doorbell_interrupt)
5331                 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5332
5333         return 0;
5334 }
5335
5336 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
5337 {
5338         int i;
5339
5340         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5341                 struct amdgpu_ring *ring = adev->rings[i];
5342
5343                 if (!ring || !ring->sched.thread)
5344                         continue;
5345
5346                 cancel_delayed_work_sync(&ring->sched.work_tdr);
5347         }
5348 }
5349
5350 /**
5351  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5352  * @pdev: PCI device struct
5353  * @state: PCI channel state
5354  *
5355  * Description: Called when a PCI error is detected.
5356  *
5357  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5358  */
5359 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5360 {
5361         struct drm_device *dev = pci_get_drvdata(pdev);
5362         struct amdgpu_device *adev = drm_to_adev(dev);
5363         int i;
5364
5365         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5366
5367         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5368                 DRM_WARN("No support for XGMI hive yet...");
5369                 return PCI_ERS_RESULT_DISCONNECT;
5370         }
5371
5372         switch (state) {
5373         case pci_channel_io_normal:
5374                 return PCI_ERS_RESULT_CAN_RECOVER;
5375         /* Fatal error, prepare for slot reset */
5376         case pci_channel_io_frozen:
5377                 /*
5378                  * Cancel and wait for all TDRs in progress if failing to
5379                  * set  adev->in_gpu_reset in amdgpu_device_lock_adev
5380                  *
5381                  * Locking adev->reset_sem will prevent any external access
5382                  * to GPU during PCI error recovery
5383                  */
5384                 while (!amdgpu_device_lock_adev(adev, NULL))
5385                         amdgpu_cancel_all_tdr(adev);
5386
5387                 /*
5388                  * Block any work scheduling as we do for regular GPU reset
5389                  * for the duration of the recovery
5390                  */
5391                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5392                         struct amdgpu_ring *ring = adev->rings[i];
5393
5394                         if (!ring || !ring->sched.thread)
5395                                 continue;
5396
5397                         drm_sched_stop(&ring->sched, NULL);
5398                 }
5399                 atomic_inc(&adev->gpu_reset_counter);
5400                 return PCI_ERS_RESULT_NEED_RESET;
5401         case pci_channel_io_perm_failure:
5402                 /* Permanent error, prepare for device removal */
5403                 return PCI_ERS_RESULT_DISCONNECT;
5404         }
5405
5406         return PCI_ERS_RESULT_NEED_RESET;
5407 }
5408
5409 /**
5410  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5411  * @pdev: pointer to PCI device
5412  */
5413 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5414 {
5415
5416         DRM_INFO("PCI error: mmio enabled callback!!\n");
5417
5418         /* TODO - dump whatever for debugging purposes */
5419
5420         /* This called only if amdgpu_pci_error_detected returns
5421          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5422          * works, no need to reset slot.
5423          */
5424
5425         return PCI_ERS_RESULT_RECOVERED;
5426 }
5427
5428 /**
5429  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5430  * @pdev: PCI device struct
5431  *
5432  * Description: This routine is called by the pci error recovery
5433  * code after the PCI slot has been reset, just before we
5434  * should resume normal operations.
5435  */
5436 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5437 {
5438         struct drm_device *dev = pci_get_drvdata(pdev);
5439         struct amdgpu_device *adev = drm_to_adev(dev);
5440         int r, i;
5441         struct amdgpu_reset_context reset_context;
5442         u32 memsize;
5443         struct list_head device_list;
5444
5445         DRM_INFO("PCI error: slot reset callback!!\n");
5446
5447         memset(&reset_context, 0, sizeof(reset_context));
5448
5449         INIT_LIST_HEAD(&device_list);
5450         list_add_tail(&adev->reset_list, &device_list);
5451
5452         /* wait for asic to come out of reset */
5453         msleep(500);
5454
5455         /* Restore PCI confspace */
5456         amdgpu_device_load_pci_state(pdev);
5457
5458         /* confirm  ASIC came out of reset */
5459         for (i = 0; i < adev->usec_timeout; i++) {
5460                 memsize = amdgpu_asic_get_config_memsize(adev);
5461
5462                 if (memsize != 0xffffffff)
5463                         break;
5464                 udelay(1);
5465         }
5466         if (memsize == 0xffffffff) {
5467                 r = -ETIME;
5468                 goto out;
5469         }
5470
5471         reset_context.method = AMD_RESET_METHOD_NONE;
5472         reset_context.reset_req_dev = adev;
5473         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5474         set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5475
5476         adev->no_hw_access = true;
5477         r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5478         adev->no_hw_access = false;
5479         if (r)
5480                 goto out;
5481
5482         r = amdgpu_do_asic_reset(&device_list, &reset_context);
5483
5484 out:
5485         if (!r) {
5486                 if (amdgpu_device_cache_pci_state(adev->pdev))
5487                         pci_restore_state(adev->pdev);
5488
5489                 DRM_INFO("PCIe error recovery succeeded\n");
5490         } else {
5491                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5492                 amdgpu_device_unlock_adev(adev);
5493         }
5494
5495         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5496 }
5497
5498 /**
5499  * amdgpu_pci_resume() - resume normal ops after PCI reset
5500  * @pdev: pointer to PCI device
5501  *
5502  * Called when the error recovery driver tells us that its
5503  * OK to resume normal operation.
5504  */
5505 void amdgpu_pci_resume(struct pci_dev *pdev)
5506 {
5507         struct drm_device *dev = pci_get_drvdata(pdev);
5508         struct amdgpu_device *adev = drm_to_adev(dev);
5509         int i;
5510
5511
5512         DRM_INFO("PCI error: resume callback!!\n");
5513
5514         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5515                 struct amdgpu_ring *ring = adev->rings[i];
5516
5517                 if (!ring || !ring->sched.thread)
5518                         continue;
5519
5520
5521                 drm_sched_resubmit_jobs(&ring->sched);
5522                 drm_sched_start(&ring->sched, true);
5523         }
5524
5525         amdgpu_device_unlock_adev(adev);
5526 }
5527
5528 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5529 {
5530         struct drm_device *dev = pci_get_drvdata(pdev);
5531         struct amdgpu_device *adev = drm_to_adev(dev);
5532         int r;
5533
5534         r = pci_save_state(pdev);
5535         if (!r) {
5536                 kfree(adev->pci_state);
5537
5538                 adev->pci_state = pci_store_saved_state(pdev);
5539
5540                 if (!adev->pci_state) {
5541                         DRM_ERROR("Failed to store PCI saved state");
5542                         return false;
5543                 }
5544         } else {
5545                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5546                 return false;
5547         }
5548
5549         return true;
5550 }
5551
5552 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5553 {
5554         struct drm_device *dev = pci_get_drvdata(pdev);
5555         struct amdgpu_device *adev = drm_to_adev(dev);
5556         int r;
5557
5558         if (!adev->pci_state)
5559                 return false;
5560
5561         r = pci_load_saved_state(pdev, adev->pci_state);
5562
5563         if (!r) {
5564                 pci_restore_state(pdev);
5565         } else {
5566                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5567                 return false;
5568         }
5569
5570         return true;
5571 }
5572
5573 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5574                 struct amdgpu_ring *ring)
5575 {
5576 #ifdef CONFIG_X86_64
5577         if (adev->flags & AMD_IS_APU)
5578                 return;
5579 #endif
5580         if (adev->gmc.xgmi.connected_to_cpu)
5581                 return;
5582
5583         if (ring && ring->funcs->emit_hdp_flush)
5584                 amdgpu_ring_emit_hdp_flush(ring);
5585         else
5586                 amdgpu_asic_flush_hdp(adev, ring);
5587 }
5588
5589 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5590                 struct amdgpu_ring *ring)
5591 {
5592 #ifdef CONFIG_X86_64
5593         if (adev->flags & AMD_IS_APU)
5594                 return;
5595 #endif
5596         if (adev->gmc.xgmi.connected_to_cpu)
5597                 return;
5598
5599         amdgpu_asic_invalidate_hdp(adev, ring);
5600 }
This page took 0.368792 seconds and 4 git commands to generate.