]> Git Repo - J-linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drm/amdgpu: Add only valid firmware version nodes
[J-linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38 #include <linux/apple-gmux.h>
39
40 #include <drm/drm_aperture.h>
41 #include <drm/drm_atomic_helper.h>
42 #include <drm/drm_crtc_helper.h>
43 #include <drm/drm_fb_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/amdgpu_drm.h>
46 #include <linux/vgaarb.h>
47 #include <linux/vga_switcheroo.h>
48 #include <linux/efi.h>
49 #include "amdgpu.h"
50 #include "amdgpu_trace.h"
51 #include "amdgpu_i2c.h"
52 #include "atom.h"
53 #include "amdgpu_atombios.h"
54 #include "amdgpu_atomfirmware.h"
55 #include "amd_pcie.h"
56 #ifdef CONFIG_DRM_AMDGPU_SI
57 #include "si.h"
58 #endif
59 #ifdef CONFIG_DRM_AMDGPU_CIK
60 #include "cik.h"
61 #endif
62 #include "vi.h"
63 #include "soc15.h"
64 #include "nv.h"
65 #include "bif/bif_4_1_d.h"
66 #include <linux/firmware.h>
67 #include "amdgpu_vf_error.h"
68
69 #include "amdgpu_amdkfd.h"
70 #include "amdgpu_pm.h"
71
72 #include "amdgpu_xgmi.h"
73 #include "amdgpu_ras.h"
74 #include "amdgpu_pmu.h"
75 #include "amdgpu_fru_eeprom.h"
76 #include "amdgpu_reset.h"
77
78 #include <linux/suspend.h>
79 #include <drm/task_barrier.h>
80 #include <linux/pm_runtime.h>
81
82 #include <drm/drm_drv.h>
83
84 #if IS_ENABLED(CONFIG_X86)
85 #include <asm/intel-family.h>
86 #endif
87
88 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
89 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
90 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
91 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
92 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
93 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
94 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
95
96 #define AMDGPU_RESUME_MS                2000
97 #define AMDGPU_MAX_RETRY_LIMIT          2
98 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
99
100 static const struct drm_driver amdgpu_kms_driver;
101
102 const char *amdgpu_asic_name[] = {
103         "TAHITI",
104         "PITCAIRN",
105         "VERDE",
106         "OLAND",
107         "HAINAN",
108         "BONAIRE",
109         "KAVERI",
110         "KABINI",
111         "HAWAII",
112         "MULLINS",
113         "TOPAZ",
114         "TONGA",
115         "FIJI",
116         "CARRIZO",
117         "STONEY",
118         "POLARIS10",
119         "POLARIS11",
120         "POLARIS12",
121         "VEGAM",
122         "VEGA10",
123         "VEGA12",
124         "VEGA20",
125         "RAVEN",
126         "ARCTURUS",
127         "RENOIR",
128         "ALDEBARAN",
129         "NAVI10",
130         "CYAN_SKILLFISH",
131         "NAVI14",
132         "NAVI12",
133         "SIENNA_CICHLID",
134         "NAVY_FLOUNDER",
135         "VANGOGH",
136         "DIMGREY_CAVEFISH",
137         "BEIGE_GOBY",
138         "YELLOW_CARP",
139         "IP DISCOVERY",
140         "LAST",
141 };
142
143 /**
144  * DOC: pcie_replay_count
145  *
146  * The amdgpu driver provides a sysfs API for reporting the total number
147  * of PCIe replays (NAKs)
148  * The file pcie_replay_count is used for this and returns the total
149  * number of replays as a sum of the NAKs generated and NAKs received
150  */
151
152 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
153                 struct device_attribute *attr, char *buf)
154 {
155         struct drm_device *ddev = dev_get_drvdata(dev);
156         struct amdgpu_device *adev = drm_to_adev(ddev);
157         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
158
159         return sysfs_emit(buf, "%llu\n", cnt);
160 }
161
162 static DEVICE_ATTR(pcie_replay_count, 0444,
163                 amdgpu_device_get_pcie_replay_count, NULL);
164
165 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
166
167
168 /**
169  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
170  *
171  * @dev: drm_device pointer
172  *
173  * Returns true if the device is a dGPU with ATPX power control,
174  * otherwise return false.
175  */
176 bool amdgpu_device_supports_px(struct drm_device *dev)
177 {
178         struct amdgpu_device *adev = drm_to_adev(dev);
179
180         if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
181                 return true;
182         return false;
183 }
184
185 /**
186  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
187  *
188  * @dev: drm_device pointer
189  *
190  * Returns true if the device is a dGPU with ACPI power control,
191  * otherwise return false.
192  */
193 bool amdgpu_device_supports_boco(struct drm_device *dev)
194 {
195         struct amdgpu_device *adev = drm_to_adev(dev);
196
197         if (adev->has_pr3 ||
198             ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
199                 return true;
200         return false;
201 }
202
203 /**
204  * amdgpu_device_supports_baco - Does the device support BACO
205  *
206  * @dev: drm_device pointer
207  *
208  * Returns true if the device supporte BACO,
209  * otherwise return false.
210  */
211 bool amdgpu_device_supports_baco(struct drm_device *dev)
212 {
213         struct amdgpu_device *adev = drm_to_adev(dev);
214
215         return amdgpu_asic_supports_baco(adev);
216 }
217
218 /**
219  * amdgpu_device_supports_smart_shift - Is the device dGPU with
220  * smart shift support
221  *
222  * @dev: drm_device pointer
223  *
224  * Returns true if the device is a dGPU with Smart Shift support,
225  * otherwise returns false.
226  */
227 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
228 {
229         return (amdgpu_device_supports_boco(dev) &&
230                 amdgpu_acpi_is_power_shift_control_supported());
231 }
232
233 /*
234  * VRAM access helper functions
235  */
236
237 /**
238  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
239  *
240  * @adev: amdgpu_device pointer
241  * @pos: offset of the buffer in vram
242  * @buf: virtual address of the buffer in system memory
243  * @size: read/write size, sizeof(@buf) must > @size
244  * @write: true - write to vram, otherwise - read from vram
245  */
246 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
247                              void *buf, size_t size, bool write)
248 {
249         unsigned long flags;
250         uint32_t hi = ~0, tmp = 0;
251         uint32_t *data = buf;
252         uint64_t last;
253         int idx;
254
255         if (!drm_dev_enter(adev_to_drm(adev), &idx))
256                 return;
257
258         BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
259
260         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
261         for (last = pos + size; pos < last; pos += 4) {
262                 tmp = pos >> 31;
263
264                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
265                 if (tmp != hi) {
266                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
267                         hi = tmp;
268                 }
269                 if (write)
270                         WREG32_NO_KIQ(mmMM_DATA, *data++);
271                 else
272                         *data++ = RREG32_NO_KIQ(mmMM_DATA);
273         }
274
275         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
276         drm_dev_exit(idx);
277 }
278
279 /**
280  * amdgpu_device_aper_access - access vram by vram aperature
281  *
282  * @adev: amdgpu_device pointer
283  * @pos: offset of the buffer in vram
284  * @buf: virtual address of the buffer in system memory
285  * @size: read/write size, sizeof(@buf) must > @size
286  * @write: true - write to vram, otherwise - read from vram
287  *
288  * The return value means how many bytes have been transferred.
289  */
290 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
291                                  void *buf, size_t size, bool write)
292 {
293 #ifdef CONFIG_64BIT
294         void __iomem *addr;
295         size_t count = 0;
296         uint64_t last;
297
298         if (!adev->mman.aper_base_kaddr)
299                 return 0;
300
301         last = min(pos + size, adev->gmc.visible_vram_size);
302         if (last > pos) {
303                 addr = adev->mman.aper_base_kaddr + pos;
304                 count = last - pos;
305
306                 if (write) {
307                         memcpy_toio(addr, buf, count);
308                         /* Make sure HDP write cache flush happens without any reordering
309                          * after the system memory contents are sent over PCIe device
310                          */
311                         mb();
312                         amdgpu_device_flush_hdp(adev, NULL);
313                 } else {
314                         amdgpu_device_invalidate_hdp(adev, NULL);
315                         /* Make sure HDP read cache is invalidated before issuing a read
316                          * to the PCIe device
317                          */
318                         mb();
319                         memcpy_fromio(buf, addr, count);
320                 }
321
322         }
323
324         return count;
325 #else
326         return 0;
327 #endif
328 }
329
330 /**
331  * amdgpu_device_vram_access - read/write a buffer in vram
332  *
333  * @adev: amdgpu_device pointer
334  * @pos: offset of the buffer in vram
335  * @buf: virtual address of the buffer in system memory
336  * @size: read/write size, sizeof(@buf) must > @size
337  * @write: true - write to vram, otherwise - read from vram
338  */
339 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
340                                void *buf, size_t size, bool write)
341 {
342         size_t count;
343
344         /* try to using vram apreature to access vram first */
345         count = amdgpu_device_aper_access(adev, pos, buf, size, write);
346         size -= count;
347         if (size) {
348                 /* using MM to access rest vram */
349                 pos += count;
350                 buf += count;
351                 amdgpu_device_mm_access(adev, pos, buf, size, write);
352         }
353 }
354
355 /*
356  * register access helper functions.
357  */
358
359 /* Check if hw access should be skipped because of hotplug or device error */
360 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
361 {
362         if (adev->no_hw_access)
363                 return true;
364
365 #ifdef CONFIG_LOCKDEP
366         /*
367          * This is a bit complicated to understand, so worth a comment. What we assert
368          * here is that the GPU reset is not running on another thread in parallel.
369          *
370          * For this we trylock the read side of the reset semaphore, if that succeeds
371          * we know that the reset is not running in paralell.
372          *
373          * If the trylock fails we assert that we are either already holding the read
374          * side of the lock or are the reset thread itself and hold the write side of
375          * the lock.
376          */
377         if (in_task()) {
378                 if (down_read_trylock(&adev->reset_domain->sem))
379                         up_read(&adev->reset_domain->sem);
380                 else
381                         lockdep_assert_held(&adev->reset_domain->sem);
382         }
383 #endif
384         return false;
385 }
386
387 /**
388  * amdgpu_device_rreg - read a memory mapped IO or indirect register
389  *
390  * @adev: amdgpu_device pointer
391  * @reg: dword aligned register offset
392  * @acc_flags: access flags which require special behavior
393  *
394  * Returns the 32 bit value from the offset specified.
395  */
396 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
397                             uint32_t reg, uint32_t acc_flags)
398 {
399         uint32_t ret;
400
401         if (amdgpu_device_skip_hw_access(adev))
402                 return 0;
403
404         if ((reg * 4) < adev->rmmio_size) {
405                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
406                     amdgpu_sriov_runtime(adev) &&
407                     down_read_trylock(&adev->reset_domain->sem)) {
408                         ret = amdgpu_kiq_rreg(adev, reg);
409                         up_read(&adev->reset_domain->sem);
410                 } else {
411                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
412                 }
413         } else {
414                 ret = adev->pcie_rreg(adev, reg * 4);
415         }
416
417         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
418
419         return ret;
420 }
421
422 /*
423  * MMIO register read with bytes helper functions
424  * @offset:bytes offset from MMIO start
425  */
426
427 /**
428  * amdgpu_mm_rreg8 - read a memory mapped IO register
429  *
430  * @adev: amdgpu_device pointer
431  * @offset: byte aligned register offset
432  *
433  * Returns the 8 bit value from the offset specified.
434  */
435 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
436 {
437         if (amdgpu_device_skip_hw_access(adev))
438                 return 0;
439
440         if (offset < adev->rmmio_size)
441                 return (readb(adev->rmmio + offset));
442         BUG();
443 }
444
445 /*
446  * MMIO register write with bytes helper functions
447  * @offset:bytes offset from MMIO start
448  * @value: the value want to be written to the register
449  */
450
451 /**
452  * amdgpu_mm_wreg8 - read a memory mapped IO register
453  *
454  * @adev: amdgpu_device pointer
455  * @offset: byte aligned register offset
456  * @value: 8 bit value to write
457  *
458  * Writes the value specified to the offset specified.
459  */
460 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
461 {
462         if (amdgpu_device_skip_hw_access(adev))
463                 return;
464
465         if (offset < adev->rmmio_size)
466                 writeb(value, adev->rmmio + offset);
467         else
468                 BUG();
469 }
470
471 /**
472  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
473  *
474  * @adev: amdgpu_device pointer
475  * @reg: dword aligned register offset
476  * @v: 32 bit value to write to the register
477  * @acc_flags: access flags which require special behavior
478  *
479  * Writes the value specified to the offset specified.
480  */
481 void amdgpu_device_wreg(struct amdgpu_device *adev,
482                         uint32_t reg, uint32_t v,
483                         uint32_t acc_flags)
484 {
485         if (amdgpu_device_skip_hw_access(adev))
486                 return;
487
488         if ((reg * 4) < adev->rmmio_size) {
489                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
490                     amdgpu_sriov_runtime(adev) &&
491                     down_read_trylock(&adev->reset_domain->sem)) {
492                         amdgpu_kiq_wreg(adev, reg, v);
493                         up_read(&adev->reset_domain->sem);
494                 } else {
495                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
496                 }
497         } else {
498                 adev->pcie_wreg(adev, reg * 4, v);
499         }
500
501         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
502 }
503
504 /**
505  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
506  *
507  * @adev: amdgpu_device pointer
508  * @reg: mmio/rlc register
509  * @v: value to write
510  * @xcc_id: xcc accelerated compute core id
511  *
512  * this function is invoked only for the debugfs register access
513  */
514 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
515                              uint32_t reg, uint32_t v,
516                              uint32_t xcc_id)
517 {
518         if (amdgpu_device_skip_hw_access(adev))
519                 return;
520
521         if (amdgpu_sriov_fullaccess(adev) &&
522             adev->gfx.rlc.funcs &&
523             adev->gfx.rlc.funcs->is_rlcg_access_range) {
524                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
525                         return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
526         } else if ((reg * 4) >= adev->rmmio_size) {
527                 adev->pcie_wreg(adev, reg * 4, v);
528         } else {
529                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
530         }
531 }
532
533 /**
534  * amdgpu_device_indirect_rreg - read an indirect register
535  *
536  * @adev: amdgpu_device pointer
537  * @reg_addr: indirect register address to read from
538  *
539  * Returns the value of indirect register @reg_addr
540  */
541 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
542                                 u32 reg_addr)
543 {
544         unsigned long flags, pcie_index, pcie_data;
545         void __iomem *pcie_index_offset;
546         void __iomem *pcie_data_offset;
547         u32 r;
548
549         pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
550         pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
551
552         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
553         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
554         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
555
556         writel(reg_addr, pcie_index_offset);
557         readl(pcie_index_offset);
558         r = readl(pcie_data_offset);
559         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
560
561         return r;
562 }
563
564 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
565                                     u64 reg_addr)
566 {
567         unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
568         u32 r;
569         void __iomem *pcie_index_offset;
570         void __iomem *pcie_index_hi_offset;
571         void __iomem *pcie_data_offset;
572
573         pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
574         pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
575         if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
576                 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
577         else
578                 pcie_index_hi = 0;
579
580         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
581         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
582         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
583         if (pcie_index_hi != 0)
584                 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
585                                 pcie_index_hi * 4;
586
587         writel(reg_addr, pcie_index_offset);
588         readl(pcie_index_offset);
589         if (pcie_index_hi != 0) {
590                 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
591                 readl(pcie_index_hi_offset);
592         }
593         r = readl(pcie_data_offset);
594
595         /* clear the high bits */
596         if (pcie_index_hi != 0) {
597                 writel(0, pcie_index_hi_offset);
598                 readl(pcie_index_hi_offset);
599         }
600
601         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
602
603         return r;
604 }
605
606 /**
607  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
608  *
609  * @adev: amdgpu_device pointer
610  * @reg_addr: indirect register address to read from
611  *
612  * Returns the value of indirect register @reg_addr
613  */
614 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
615                                   u32 reg_addr)
616 {
617         unsigned long flags, pcie_index, pcie_data;
618         void __iomem *pcie_index_offset;
619         void __iomem *pcie_data_offset;
620         u64 r;
621
622         pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
623         pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
624
625         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
626         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
627         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
628
629         /* read low 32 bits */
630         writel(reg_addr, pcie_index_offset);
631         readl(pcie_index_offset);
632         r = readl(pcie_data_offset);
633         /* read high 32 bits */
634         writel(reg_addr + 4, pcie_index_offset);
635         readl(pcie_index_offset);
636         r |= ((u64)readl(pcie_data_offset) << 32);
637         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
638
639         return r;
640 }
641
642 u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
643                                   u64 reg_addr)
644 {
645         unsigned long flags, pcie_index, pcie_data;
646         unsigned long pcie_index_hi = 0;
647         void __iomem *pcie_index_offset;
648         void __iomem *pcie_index_hi_offset;
649         void __iomem *pcie_data_offset;
650         u64 r;
651
652         pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
653         pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
654         if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
655                 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
656
657         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
658         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
659         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
660         if (pcie_index_hi != 0)
661                 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
662                         pcie_index_hi * 4;
663
664         /* read low 32 bits */
665         writel(reg_addr, pcie_index_offset);
666         readl(pcie_index_offset);
667         if (pcie_index_hi != 0) {
668                 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
669                 readl(pcie_index_hi_offset);
670         }
671         r = readl(pcie_data_offset);
672         /* read high 32 bits */
673         writel(reg_addr + 4, pcie_index_offset);
674         readl(pcie_index_offset);
675         if (pcie_index_hi != 0) {
676                 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
677                 readl(pcie_index_hi_offset);
678         }
679         r |= ((u64)readl(pcie_data_offset) << 32);
680
681         /* clear the high bits */
682         if (pcie_index_hi != 0) {
683                 writel(0, pcie_index_hi_offset);
684                 readl(pcie_index_hi_offset);
685         }
686
687         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
688
689         return r;
690 }
691
692 /**
693  * amdgpu_device_indirect_wreg - write an indirect register address
694  *
695  * @adev: amdgpu_device pointer
696  * @reg_addr: indirect register offset
697  * @reg_data: indirect register data
698  *
699  */
700 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
701                                  u32 reg_addr, u32 reg_data)
702 {
703         unsigned long flags, pcie_index, pcie_data;
704         void __iomem *pcie_index_offset;
705         void __iomem *pcie_data_offset;
706
707         pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
708         pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
709
710         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
711         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
712         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
713
714         writel(reg_addr, pcie_index_offset);
715         readl(pcie_index_offset);
716         writel(reg_data, pcie_data_offset);
717         readl(pcie_data_offset);
718         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
719 }
720
721 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
722                                      u64 reg_addr, u32 reg_data)
723 {
724         unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
725         void __iomem *pcie_index_offset;
726         void __iomem *pcie_index_hi_offset;
727         void __iomem *pcie_data_offset;
728
729         pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
730         pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
731         if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
732                 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
733         else
734                 pcie_index_hi = 0;
735
736         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
737         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
738         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
739         if (pcie_index_hi != 0)
740                 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
741                                 pcie_index_hi * 4;
742
743         writel(reg_addr, pcie_index_offset);
744         readl(pcie_index_offset);
745         if (pcie_index_hi != 0) {
746                 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
747                 readl(pcie_index_hi_offset);
748         }
749         writel(reg_data, pcie_data_offset);
750         readl(pcie_data_offset);
751
752         /* clear the high bits */
753         if (pcie_index_hi != 0) {
754                 writel(0, pcie_index_hi_offset);
755                 readl(pcie_index_hi_offset);
756         }
757
758         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
759 }
760
761 /**
762  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
763  *
764  * @adev: amdgpu_device pointer
765  * @reg_addr: indirect register offset
766  * @reg_data: indirect register data
767  *
768  */
769 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
770                                    u32 reg_addr, u64 reg_data)
771 {
772         unsigned long flags, pcie_index, pcie_data;
773         void __iomem *pcie_index_offset;
774         void __iomem *pcie_data_offset;
775
776         pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
777         pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
778
779         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
780         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
781         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
782
783         /* write low 32 bits */
784         writel(reg_addr, pcie_index_offset);
785         readl(pcie_index_offset);
786         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
787         readl(pcie_data_offset);
788         /* write high 32 bits */
789         writel(reg_addr + 4, pcie_index_offset);
790         readl(pcie_index_offset);
791         writel((u32)(reg_data >> 32), pcie_data_offset);
792         readl(pcie_data_offset);
793         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
794 }
795
796 void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
797                                    u64 reg_addr, u64 reg_data)
798 {
799         unsigned long flags, pcie_index, pcie_data;
800         unsigned long pcie_index_hi = 0;
801         void __iomem *pcie_index_offset;
802         void __iomem *pcie_index_hi_offset;
803         void __iomem *pcie_data_offset;
804
805         pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
806         pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
807         if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
808                 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
809
810         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
811         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
812         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
813         if (pcie_index_hi != 0)
814                 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
815                                 pcie_index_hi * 4;
816
817         /* write low 32 bits */
818         writel(reg_addr, pcie_index_offset);
819         readl(pcie_index_offset);
820         if (pcie_index_hi != 0) {
821                 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
822                 readl(pcie_index_hi_offset);
823         }
824         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
825         readl(pcie_data_offset);
826         /* write high 32 bits */
827         writel(reg_addr + 4, pcie_index_offset);
828         readl(pcie_index_offset);
829         if (pcie_index_hi != 0) {
830                 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
831                 readl(pcie_index_hi_offset);
832         }
833         writel((u32)(reg_data >> 32), pcie_data_offset);
834         readl(pcie_data_offset);
835
836         /* clear the high bits */
837         if (pcie_index_hi != 0) {
838                 writel(0, pcie_index_hi_offset);
839                 readl(pcie_index_hi_offset);
840         }
841
842         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
843 }
844
845 /**
846  * amdgpu_device_get_rev_id - query device rev_id
847  *
848  * @adev: amdgpu_device pointer
849  *
850  * Return device rev_id
851  */
852 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
853 {
854         return adev->nbio.funcs->get_rev_id(adev);
855 }
856
857 /**
858  * amdgpu_invalid_rreg - dummy reg read function
859  *
860  * @adev: amdgpu_device pointer
861  * @reg: offset of register
862  *
863  * Dummy register read function.  Used for register blocks
864  * that certain asics don't have (all asics).
865  * Returns the value in the register.
866  */
867 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
868 {
869         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
870         BUG();
871         return 0;
872 }
873
874 static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
875 {
876         DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
877         BUG();
878         return 0;
879 }
880
881 /**
882  * amdgpu_invalid_wreg - dummy reg write function
883  *
884  * @adev: amdgpu_device pointer
885  * @reg: offset of register
886  * @v: value to write to the register
887  *
888  * Dummy register read function.  Used for register blocks
889  * that certain asics don't have (all asics).
890  */
891 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
892 {
893         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
894                   reg, v);
895         BUG();
896 }
897
898 static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
899 {
900         DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
901                   reg, v);
902         BUG();
903 }
904
905 /**
906  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
907  *
908  * @adev: amdgpu_device pointer
909  * @reg: offset of register
910  *
911  * Dummy register read function.  Used for register blocks
912  * that certain asics don't have (all asics).
913  * Returns the value in the register.
914  */
915 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
916 {
917         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
918         BUG();
919         return 0;
920 }
921
922 static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
923 {
924         DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
925         BUG();
926         return 0;
927 }
928
929 /**
930  * amdgpu_invalid_wreg64 - dummy reg write function
931  *
932  * @adev: amdgpu_device pointer
933  * @reg: offset of register
934  * @v: value to write to the register
935  *
936  * Dummy register read function.  Used for register blocks
937  * that certain asics don't have (all asics).
938  */
939 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
940 {
941         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
942                   reg, v);
943         BUG();
944 }
945
946 static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
947 {
948         DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
949                   reg, v);
950         BUG();
951 }
952
953 /**
954  * amdgpu_block_invalid_rreg - dummy reg read function
955  *
956  * @adev: amdgpu_device pointer
957  * @block: offset of instance
958  * @reg: offset of register
959  *
960  * Dummy register read function.  Used for register blocks
961  * that certain asics don't have (all asics).
962  * Returns the value in the register.
963  */
964 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
965                                           uint32_t block, uint32_t reg)
966 {
967         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
968                   reg, block);
969         BUG();
970         return 0;
971 }
972
973 /**
974  * amdgpu_block_invalid_wreg - dummy reg write function
975  *
976  * @adev: amdgpu_device pointer
977  * @block: offset of instance
978  * @reg: offset of register
979  * @v: value to write to the register
980  *
981  * Dummy register read function.  Used for register blocks
982  * that certain asics don't have (all asics).
983  */
984 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
985                                       uint32_t block,
986                                       uint32_t reg, uint32_t v)
987 {
988         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
989                   reg, block, v);
990         BUG();
991 }
992
993 /**
994  * amdgpu_device_asic_init - Wrapper for atom asic_init
995  *
996  * @adev: amdgpu_device pointer
997  *
998  * Does any asic specific work and then calls atom asic init.
999  */
1000 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
1001 {
1002         int ret;
1003
1004         amdgpu_asic_pre_asic_init(adev);
1005
1006         if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) ||
1007             adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) {
1008                 amdgpu_psp_wait_for_bootloader(adev);
1009                 ret = amdgpu_atomfirmware_asic_init(adev, true);
1010                 return ret;
1011         } else {
1012                 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
1013         }
1014
1015         return 0;
1016 }
1017
1018 /**
1019  * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
1020  *
1021  * @adev: amdgpu_device pointer
1022  *
1023  * Allocates a scratch page of VRAM for use by various things in the
1024  * driver.
1025  */
1026 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
1027 {
1028         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
1029                                        AMDGPU_GEM_DOMAIN_VRAM |
1030                                        AMDGPU_GEM_DOMAIN_GTT,
1031                                        &adev->mem_scratch.robj,
1032                                        &adev->mem_scratch.gpu_addr,
1033                                        (void **)&adev->mem_scratch.ptr);
1034 }
1035
1036 /**
1037  * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
1038  *
1039  * @adev: amdgpu_device pointer
1040  *
1041  * Frees the VRAM scratch page.
1042  */
1043 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
1044 {
1045         amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
1046 }
1047
1048 /**
1049  * amdgpu_device_program_register_sequence - program an array of registers.
1050  *
1051  * @adev: amdgpu_device pointer
1052  * @registers: pointer to the register array
1053  * @array_size: size of the register array
1054  *
1055  * Programs an array or registers with and or masks.
1056  * This is a helper for setting golden registers.
1057  */
1058 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1059                                              const u32 *registers,
1060                                              const u32 array_size)
1061 {
1062         u32 tmp, reg, and_mask, or_mask;
1063         int i;
1064
1065         if (array_size % 3)
1066                 return;
1067
1068         for (i = 0; i < array_size; i += 3) {
1069                 reg = registers[i + 0];
1070                 and_mask = registers[i + 1];
1071                 or_mask = registers[i + 2];
1072
1073                 if (and_mask == 0xffffffff) {
1074                         tmp = or_mask;
1075                 } else {
1076                         tmp = RREG32(reg);
1077                         tmp &= ~and_mask;
1078                         if (adev->family >= AMDGPU_FAMILY_AI)
1079                                 tmp |= (or_mask & and_mask);
1080                         else
1081                                 tmp |= or_mask;
1082                 }
1083                 WREG32(reg, tmp);
1084         }
1085 }
1086
1087 /**
1088  * amdgpu_device_pci_config_reset - reset the GPU
1089  *
1090  * @adev: amdgpu_device pointer
1091  *
1092  * Resets the GPU using the pci config reset sequence.
1093  * Only applicable to asics prior to vega10.
1094  */
1095 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1096 {
1097         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1098 }
1099
1100 /**
1101  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1102  *
1103  * @adev: amdgpu_device pointer
1104  *
1105  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1106  */
1107 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1108 {
1109         return pci_reset_function(adev->pdev);
1110 }
1111
1112 /*
1113  * amdgpu_device_wb_*()
1114  * Writeback is the method by which the GPU updates special pages in memory
1115  * with the status of certain GPU events (fences, ring pointers,etc.).
1116  */
1117
1118 /**
1119  * amdgpu_device_wb_fini - Disable Writeback and free memory
1120  *
1121  * @adev: amdgpu_device pointer
1122  *
1123  * Disables Writeback and frees the Writeback memory (all asics).
1124  * Used at driver shutdown.
1125  */
1126 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1127 {
1128         if (adev->wb.wb_obj) {
1129                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1130                                       &adev->wb.gpu_addr,
1131                                       (void **)&adev->wb.wb);
1132                 adev->wb.wb_obj = NULL;
1133         }
1134 }
1135
1136 /**
1137  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1138  *
1139  * @adev: amdgpu_device pointer
1140  *
1141  * Initializes writeback and allocates writeback memory (all asics).
1142  * Used at driver startup.
1143  * Returns 0 on success or an -error on failure.
1144  */
1145 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1146 {
1147         int r;
1148
1149         if (adev->wb.wb_obj == NULL) {
1150                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1151                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1152                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1153                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1154                                             (void **)&adev->wb.wb);
1155                 if (r) {
1156                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1157                         return r;
1158                 }
1159
1160                 adev->wb.num_wb = AMDGPU_MAX_WB;
1161                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1162
1163                 /* clear wb memory */
1164                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1165         }
1166
1167         return 0;
1168 }
1169
1170 /**
1171  * amdgpu_device_wb_get - Allocate a wb entry
1172  *
1173  * @adev: amdgpu_device pointer
1174  * @wb: wb index
1175  *
1176  * Allocate a wb slot for use by the driver (all asics).
1177  * Returns 0 on success or -EINVAL on failure.
1178  */
1179 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1180 {
1181         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1182
1183         if (offset < adev->wb.num_wb) {
1184                 __set_bit(offset, adev->wb.used);
1185                 *wb = offset << 3; /* convert to dw offset */
1186                 return 0;
1187         } else {
1188                 return -EINVAL;
1189         }
1190 }
1191
1192 /**
1193  * amdgpu_device_wb_free - Free a wb entry
1194  *
1195  * @adev: amdgpu_device pointer
1196  * @wb: wb index
1197  *
1198  * Free a wb slot allocated for use by the driver (all asics)
1199  */
1200 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1201 {
1202         wb >>= 3;
1203         if (wb < adev->wb.num_wb)
1204                 __clear_bit(wb, adev->wb.used);
1205 }
1206
1207 /**
1208  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1209  *
1210  * @adev: amdgpu_device pointer
1211  *
1212  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1213  * to fail, but if any of the BARs is not accessible after the size we abort
1214  * driver loading by returning -ENODEV.
1215  */
1216 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1217 {
1218         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1219         struct pci_bus *root;
1220         struct resource *res;
1221         unsigned int i;
1222         u16 cmd;
1223         int r;
1224
1225         if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1226                 return 0;
1227
1228         /* Bypass for VF */
1229         if (amdgpu_sriov_vf(adev))
1230                 return 0;
1231
1232         /* skip if the bios has already enabled large BAR */
1233         if (adev->gmc.real_vram_size &&
1234             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1235                 return 0;
1236
1237         /* Check if the root BUS has 64bit memory resources */
1238         root = adev->pdev->bus;
1239         while (root->parent)
1240                 root = root->parent;
1241
1242         pci_bus_for_each_resource(root, res, i) {
1243                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1244                     res->start > 0x100000000ull)
1245                         break;
1246         }
1247
1248         /* Trying to resize is pointless without a root hub window above 4GB */
1249         if (!res)
1250                 return 0;
1251
1252         /* Limit the BAR size to what is available */
1253         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1254                         rbar_size);
1255
1256         /* Disable memory decoding while we change the BAR addresses and size */
1257         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1258         pci_write_config_word(adev->pdev, PCI_COMMAND,
1259                               cmd & ~PCI_COMMAND_MEMORY);
1260
1261         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1262         amdgpu_doorbell_fini(adev);
1263         if (adev->asic_type >= CHIP_BONAIRE)
1264                 pci_release_resource(adev->pdev, 2);
1265
1266         pci_release_resource(adev->pdev, 0);
1267
1268         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1269         if (r == -ENOSPC)
1270                 DRM_INFO("Not enough PCI address space for a large BAR.");
1271         else if (r && r != -ENOTSUPP)
1272                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1273
1274         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1275
1276         /* When the doorbell or fb BAR isn't available we have no chance of
1277          * using the device.
1278          */
1279         r = amdgpu_doorbell_init(adev);
1280         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1281                 return -ENODEV;
1282
1283         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1284
1285         return 0;
1286 }
1287
1288 static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
1289 {
1290         if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
1291                 return false;
1292
1293         return true;
1294 }
1295
1296 /*
1297  * GPU helpers function.
1298  */
1299 /**
1300  * amdgpu_device_need_post - check if the hw need post or not
1301  *
1302  * @adev: amdgpu_device pointer
1303  *
1304  * Check if the asic has been initialized (all asics) at driver startup
1305  * or post is needed if  hw reset is performed.
1306  * Returns true if need or false if not.
1307  */
1308 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1309 {
1310         uint32_t reg;
1311
1312         if (amdgpu_sriov_vf(adev))
1313                 return false;
1314
1315         if (!amdgpu_device_read_bios(adev))
1316                 return false;
1317
1318         if (amdgpu_passthrough(adev)) {
1319                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1320                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1321                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1322                  * vpost executed for smc version below 22.15
1323                  */
1324                 if (adev->asic_type == CHIP_FIJI) {
1325                         int err;
1326                         uint32_t fw_ver;
1327
1328                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1329                         /* force vPost if error occured */
1330                         if (err)
1331                                 return true;
1332
1333                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1334                         if (fw_ver < 0x00160e00)
1335                                 return true;
1336                 }
1337         }
1338
1339         /* Don't post if we need to reset whole hive on init */
1340         if (adev->gmc.xgmi.pending_reset)
1341                 return false;
1342
1343         if (adev->has_hw_reset) {
1344                 adev->has_hw_reset = false;
1345                 return true;
1346         }
1347
1348         /* bios scratch used on CIK+ */
1349         if (adev->asic_type >= CHIP_BONAIRE)
1350                 return amdgpu_atombios_scratch_need_asic_init(adev);
1351
1352         /* check MEM_SIZE for older asics */
1353         reg = amdgpu_asic_get_config_memsize(adev);
1354
1355         if ((reg != 0) && (reg != 0xffffffff))
1356                 return false;
1357
1358         return true;
1359 }
1360
1361 /*
1362  * On APUs with >= 64GB white flickering has been observed w/ SG enabled.
1363  * Disable S/G on such systems until we have a proper fix.
1364  * https://gitlab.freedesktop.org/drm/amd/-/issues/2354
1365  * https://gitlab.freedesktop.org/drm/amd/-/issues/2735
1366  */
1367 bool amdgpu_sg_display_supported(struct amdgpu_device *adev)
1368 {
1369         switch (amdgpu_sg_display) {
1370         case -1:
1371                 break;
1372         case 0:
1373                 return false;
1374         case 1:
1375                 return true;
1376         default:
1377                 return false;
1378         }
1379         if ((totalram_pages() << (PAGE_SHIFT - 10)) +
1380             (adev->gmc.real_vram_size / 1024) >= 64000000) {
1381                 DRM_WARN("Disabling S/G due to >=64GB RAM\n");
1382                 return false;
1383         }
1384         return true;
1385 }
1386
1387 /*
1388  * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
1389  * speed switching. Until we have confirmation from Intel that a specific host
1390  * supports it, it's safer that we keep it disabled for all.
1391  *
1392  * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1393  * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1394  */
1395 bool amdgpu_device_pcie_dynamic_switching_supported(void)
1396 {
1397 #if IS_ENABLED(CONFIG_X86)
1398         struct cpuinfo_x86 *c = &cpu_data(0);
1399
1400         if (c->x86_vendor == X86_VENDOR_INTEL)
1401                 return false;
1402 #endif
1403         return true;
1404 }
1405
1406 /**
1407  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1408  *
1409  * @adev: amdgpu_device pointer
1410  *
1411  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1412  * be set for this device.
1413  *
1414  * Returns true if it should be used or false if not.
1415  */
1416 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1417 {
1418         switch (amdgpu_aspm) {
1419         case -1:
1420                 break;
1421         case 0:
1422                 return false;
1423         case 1:
1424                 return true;
1425         default:
1426                 return false;
1427         }
1428         return pcie_aspm_enabled(adev->pdev);
1429 }
1430
1431 bool amdgpu_device_aspm_support_quirk(void)
1432 {
1433 #if IS_ENABLED(CONFIG_X86)
1434         struct cpuinfo_x86 *c = &cpu_data(0);
1435
1436         return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
1437 #else
1438         return true;
1439 #endif
1440 }
1441
1442 /* if we get transitioned to only one device, take VGA back */
1443 /**
1444  * amdgpu_device_vga_set_decode - enable/disable vga decode
1445  *
1446  * @pdev: PCI device pointer
1447  * @state: enable/disable vga decode
1448  *
1449  * Enable/disable vga decode (all asics).
1450  * Returns VGA resource flags.
1451  */
1452 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1453                 bool state)
1454 {
1455         struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1456
1457         amdgpu_asic_set_vga_state(adev, state);
1458         if (state)
1459                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1460                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1461         else
1462                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1463 }
1464
1465 /**
1466  * amdgpu_device_check_block_size - validate the vm block size
1467  *
1468  * @adev: amdgpu_device pointer
1469  *
1470  * Validates the vm block size specified via module parameter.
1471  * The vm block size defines number of bits in page table versus page directory,
1472  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1473  * page table and the remaining bits are in the page directory.
1474  */
1475 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1476 {
1477         /* defines number of bits in page table versus page directory,
1478          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1479          * page table and the remaining bits are in the page directory
1480          */
1481         if (amdgpu_vm_block_size == -1)
1482                 return;
1483
1484         if (amdgpu_vm_block_size < 9) {
1485                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1486                          amdgpu_vm_block_size);
1487                 amdgpu_vm_block_size = -1;
1488         }
1489 }
1490
1491 /**
1492  * amdgpu_device_check_vm_size - validate the vm size
1493  *
1494  * @adev: amdgpu_device pointer
1495  *
1496  * Validates the vm size in GB specified via module parameter.
1497  * The VM size is the size of the GPU virtual memory space in GB.
1498  */
1499 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1500 {
1501         /* no need to check the default value */
1502         if (amdgpu_vm_size == -1)
1503                 return;
1504
1505         if (amdgpu_vm_size < 1) {
1506                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1507                          amdgpu_vm_size);
1508                 amdgpu_vm_size = -1;
1509         }
1510 }
1511
1512 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1513 {
1514         struct sysinfo si;
1515         bool is_os_64 = (sizeof(void *) == 8);
1516         uint64_t total_memory;
1517         uint64_t dram_size_seven_GB = 0x1B8000000;
1518         uint64_t dram_size_three_GB = 0xB8000000;
1519
1520         if (amdgpu_smu_memory_pool_size == 0)
1521                 return;
1522
1523         if (!is_os_64) {
1524                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1525                 goto def_value;
1526         }
1527         si_meminfo(&si);
1528         total_memory = (uint64_t)si.totalram * si.mem_unit;
1529
1530         if ((amdgpu_smu_memory_pool_size == 1) ||
1531                 (amdgpu_smu_memory_pool_size == 2)) {
1532                 if (total_memory < dram_size_three_GB)
1533                         goto def_value1;
1534         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1535                 (amdgpu_smu_memory_pool_size == 8)) {
1536                 if (total_memory < dram_size_seven_GB)
1537                         goto def_value1;
1538         } else {
1539                 DRM_WARN("Smu memory pool size not supported\n");
1540                 goto def_value;
1541         }
1542         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1543
1544         return;
1545
1546 def_value1:
1547         DRM_WARN("No enough system memory\n");
1548 def_value:
1549         adev->pm.smu_prv_buffer_size = 0;
1550 }
1551
1552 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1553 {
1554         if (!(adev->flags & AMD_IS_APU) ||
1555             adev->asic_type < CHIP_RAVEN)
1556                 return 0;
1557
1558         switch (adev->asic_type) {
1559         case CHIP_RAVEN:
1560                 if (adev->pdev->device == 0x15dd)
1561                         adev->apu_flags |= AMD_APU_IS_RAVEN;
1562                 if (adev->pdev->device == 0x15d8)
1563                         adev->apu_flags |= AMD_APU_IS_PICASSO;
1564                 break;
1565         case CHIP_RENOIR:
1566                 if ((adev->pdev->device == 0x1636) ||
1567                     (adev->pdev->device == 0x164c))
1568                         adev->apu_flags |= AMD_APU_IS_RENOIR;
1569                 else
1570                         adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1571                 break;
1572         case CHIP_VANGOGH:
1573                 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1574                 break;
1575         case CHIP_YELLOW_CARP:
1576                 break;
1577         case CHIP_CYAN_SKILLFISH:
1578                 if ((adev->pdev->device == 0x13FE) ||
1579                     (adev->pdev->device == 0x143F))
1580                         adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1581                 break;
1582         default:
1583                 break;
1584         }
1585
1586         return 0;
1587 }
1588
1589 /**
1590  * amdgpu_device_check_arguments - validate module params
1591  *
1592  * @adev: amdgpu_device pointer
1593  *
1594  * Validates certain module parameters and updates
1595  * the associated values used by the driver (all asics).
1596  */
1597 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1598 {
1599         if (amdgpu_sched_jobs < 4) {
1600                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1601                          amdgpu_sched_jobs);
1602                 amdgpu_sched_jobs = 4;
1603         } else if (!is_power_of_2(amdgpu_sched_jobs)) {
1604                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1605                          amdgpu_sched_jobs);
1606                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1607         }
1608
1609         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1610                 /* gart size must be greater or equal to 32M */
1611                 dev_warn(adev->dev, "gart size (%d) too small\n",
1612                          amdgpu_gart_size);
1613                 amdgpu_gart_size = -1;
1614         }
1615
1616         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1617                 /* gtt size must be greater or equal to 32M */
1618                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1619                                  amdgpu_gtt_size);
1620                 amdgpu_gtt_size = -1;
1621         }
1622
1623         /* valid range is between 4 and 9 inclusive */
1624         if (amdgpu_vm_fragment_size != -1 &&
1625             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1626                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1627                 amdgpu_vm_fragment_size = -1;
1628         }
1629
1630         if (amdgpu_sched_hw_submission < 2) {
1631                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1632                          amdgpu_sched_hw_submission);
1633                 amdgpu_sched_hw_submission = 2;
1634         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1635                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1636                          amdgpu_sched_hw_submission);
1637                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1638         }
1639
1640         if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1641                 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1642                 amdgpu_reset_method = -1;
1643         }
1644
1645         amdgpu_device_check_smu_prv_buffer_size(adev);
1646
1647         amdgpu_device_check_vm_size(adev);
1648
1649         amdgpu_device_check_block_size(adev);
1650
1651         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1652
1653         return 0;
1654 }
1655
1656 /**
1657  * amdgpu_switcheroo_set_state - set switcheroo state
1658  *
1659  * @pdev: pci dev pointer
1660  * @state: vga_switcheroo state
1661  *
1662  * Callback for the switcheroo driver.  Suspends or resumes
1663  * the asics before or after it is powered up using ACPI methods.
1664  */
1665 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1666                                         enum vga_switcheroo_state state)
1667 {
1668         struct drm_device *dev = pci_get_drvdata(pdev);
1669         int r;
1670
1671         if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1672                 return;
1673
1674         if (state == VGA_SWITCHEROO_ON) {
1675                 pr_info("switched on\n");
1676                 /* don't suspend or resume card normally */
1677                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1678
1679                 pci_set_power_state(pdev, PCI_D0);
1680                 amdgpu_device_load_pci_state(pdev);
1681                 r = pci_enable_device(pdev);
1682                 if (r)
1683                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1684                 amdgpu_device_resume(dev, true);
1685
1686                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1687         } else {
1688                 pr_info("switched off\n");
1689                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1690                 amdgpu_device_suspend(dev, true);
1691                 amdgpu_device_cache_pci_state(pdev);
1692                 /* Shut down the device */
1693                 pci_disable_device(pdev);
1694                 pci_set_power_state(pdev, PCI_D3cold);
1695                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1696         }
1697 }
1698
1699 /**
1700  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1701  *
1702  * @pdev: pci dev pointer
1703  *
1704  * Callback for the switcheroo driver.  Check of the switcheroo
1705  * state can be changed.
1706  * Returns true if the state can be changed, false if not.
1707  */
1708 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1709 {
1710         struct drm_device *dev = pci_get_drvdata(pdev);
1711
1712        /*
1713         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1714         * locking inversion with the driver load path. And the access here is
1715         * completely racy anyway. So don't bother with locking for now.
1716         */
1717         return atomic_read(&dev->open_count) == 0;
1718 }
1719
1720 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1721         .set_gpu_state = amdgpu_switcheroo_set_state,
1722         .reprobe = NULL,
1723         .can_switch = amdgpu_switcheroo_can_switch,
1724 };
1725
1726 /**
1727  * amdgpu_device_ip_set_clockgating_state - set the CG state
1728  *
1729  * @dev: amdgpu_device pointer
1730  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1731  * @state: clockgating state (gate or ungate)
1732  *
1733  * Sets the requested clockgating state for all instances of
1734  * the hardware IP specified.
1735  * Returns the error code from the last instance.
1736  */
1737 int amdgpu_device_ip_set_clockgating_state(void *dev,
1738                                            enum amd_ip_block_type block_type,
1739                                            enum amd_clockgating_state state)
1740 {
1741         struct amdgpu_device *adev = dev;
1742         int i, r = 0;
1743
1744         for (i = 0; i < adev->num_ip_blocks; i++) {
1745                 if (!adev->ip_blocks[i].status.valid)
1746                         continue;
1747                 if (adev->ip_blocks[i].version->type != block_type)
1748                         continue;
1749                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1750                         continue;
1751                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1752                         (void *)adev, state);
1753                 if (r)
1754                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1755                                   adev->ip_blocks[i].version->funcs->name, r);
1756         }
1757         return r;
1758 }
1759
1760 /**
1761  * amdgpu_device_ip_set_powergating_state - set the PG state
1762  *
1763  * @dev: amdgpu_device pointer
1764  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1765  * @state: powergating state (gate or ungate)
1766  *
1767  * Sets the requested powergating state for all instances of
1768  * the hardware IP specified.
1769  * Returns the error code from the last instance.
1770  */
1771 int amdgpu_device_ip_set_powergating_state(void *dev,
1772                                            enum amd_ip_block_type block_type,
1773                                            enum amd_powergating_state state)
1774 {
1775         struct amdgpu_device *adev = dev;
1776         int i, r = 0;
1777
1778         for (i = 0; i < adev->num_ip_blocks; i++) {
1779                 if (!adev->ip_blocks[i].status.valid)
1780                         continue;
1781                 if (adev->ip_blocks[i].version->type != block_type)
1782                         continue;
1783                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1784                         continue;
1785                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1786                         (void *)adev, state);
1787                 if (r)
1788                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1789                                   adev->ip_blocks[i].version->funcs->name, r);
1790         }
1791         return r;
1792 }
1793
1794 /**
1795  * amdgpu_device_ip_get_clockgating_state - get the CG state
1796  *
1797  * @adev: amdgpu_device pointer
1798  * @flags: clockgating feature flags
1799  *
1800  * Walks the list of IPs on the device and updates the clockgating
1801  * flags for each IP.
1802  * Updates @flags with the feature flags for each hardware IP where
1803  * clockgating is enabled.
1804  */
1805 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1806                                             u64 *flags)
1807 {
1808         int i;
1809
1810         for (i = 0; i < adev->num_ip_blocks; i++) {
1811                 if (!adev->ip_blocks[i].status.valid)
1812                         continue;
1813                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1814                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1815         }
1816 }
1817
1818 /**
1819  * amdgpu_device_ip_wait_for_idle - wait for idle
1820  *
1821  * @adev: amdgpu_device pointer
1822  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1823  *
1824  * Waits for the request hardware IP to be idle.
1825  * Returns 0 for success or a negative error code on failure.
1826  */
1827 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1828                                    enum amd_ip_block_type block_type)
1829 {
1830         int i, r;
1831
1832         for (i = 0; i < adev->num_ip_blocks; i++) {
1833                 if (!adev->ip_blocks[i].status.valid)
1834                         continue;
1835                 if (adev->ip_blocks[i].version->type == block_type) {
1836                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1837                         if (r)
1838                                 return r;
1839                         break;
1840                 }
1841         }
1842         return 0;
1843
1844 }
1845
1846 /**
1847  * amdgpu_device_ip_is_idle - is the hardware IP idle
1848  *
1849  * @adev: amdgpu_device pointer
1850  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1851  *
1852  * Check if the hardware IP is idle or not.
1853  * Returns true if it the IP is idle, false if not.
1854  */
1855 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1856                               enum amd_ip_block_type block_type)
1857 {
1858         int i;
1859
1860         for (i = 0; i < adev->num_ip_blocks; i++) {
1861                 if (!adev->ip_blocks[i].status.valid)
1862                         continue;
1863                 if (adev->ip_blocks[i].version->type == block_type)
1864                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1865         }
1866         return true;
1867
1868 }
1869
1870 /**
1871  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1872  *
1873  * @adev: amdgpu_device pointer
1874  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1875  *
1876  * Returns a pointer to the hardware IP block structure
1877  * if it exists for the asic, otherwise NULL.
1878  */
1879 struct amdgpu_ip_block *
1880 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1881                               enum amd_ip_block_type type)
1882 {
1883         int i;
1884
1885         for (i = 0; i < adev->num_ip_blocks; i++)
1886                 if (adev->ip_blocks[i].version->type == type)
1887                         return &adev->ip_blocks[i];
1888
1889         return NULL;
1890 }
1891
1892 /**
1893  * amdgpu_device_ip_block_version_cmp
1894  *
1895  * @adev: amdgpu_device pointer
1896  * @type: enum amd_ip_block_type
1897  * @major: major version
1898  * @minor: minor version
1899  *
1900  * return 0 if equal or greater
1901  * return 1 if smaller or the ip_block doesn't exist
1902  */
1903 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1904                                        enum amd_ip_block_type type,
1905                                        u32 major, u32 minor)
1906 {
1907         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1908
1909         if (ip_block && ((ip_block->version->major > major) ||
1910                         ((ip_block->version->major == major) &&
1911                         (ip_block->version->minor >= minor))))
1912                 return 0;
1913
1914         return 1;
1915 }
1916
1917 /**
1918  * amdgpu_device_ip_block_add
1919  *
1920  * @adev: amdgpu_device pointer
1921  * @ip_block_version: pointer to the IP to add
1922  *
1923  * Adds the IP block driver information to the collection of IPs
1924  * on the asic.
1925  */
1926 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1927                                const struct amdgpu_ip_block_version *ip_block_version)
1928 {
1929         if (!ip_block_version)
1930                 return -EINVAL;
1931
1932         switch (ip_block_version->type) {
1933         case AMD_IP_BLOCK_TYPE_VCN:
1934                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1935                         return 0;
1936                 break;
1937         case AMD_IP_BLOCK_TYPE_JPEG:
1938                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1939                         return 0;
1940                 break;
1941         default:
1942                 break;
1943         }
1944
1945         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1946                   ip_block_version->funcs->name);
1947
1948         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1949
1950         return 0;
1951 }
1952
1953 /**
1954  * amdgpu_device_enable_virtual_display - enable virtual display feature
1955  *
1956  * @adev: amdgpu_device pointer
1957  *
1958  * Enabled the virtual display feature if the user has enabled it via
1959  * the module parameter virtual_display.  This feature provides a virtual
1960  * display hardware on headless boards or in virtualized environments.
1961  * This function parses and validates the configuration string specified by
1962  * the user and configues the virtual display configuration (number of
1963  * virtual connectors, crtcs, etc.) specified.
1964  */
1965 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1966 {
1967         adev->enable_virtual_display = false;
1968
1969         if (amdgpu_virtual_display) {
1970                 const char *pci_address_name = pci_name(adev->pdev);
1971                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1972
1973                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1974                 pciaddstr_tmp = pciaddstr;
1975                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1976                         pciaddname = strsep(&pciaddname_tmp, ",");
1977                         if (!strcmp("all", pciaddname)
1978                             || !strcmp(pci_address_name, pciaddname)) {
1979                                 long num_crtc;
1980                                 int res = -1;
1981
1982                                 adev->enable_virtual_display = true;
1983
1984                                 if (pciaddname_tmp)
1985                                         res = kstrtol(pciaddname_tmp, 10,
1986                                                       &num_crtc);
1987
1988                                 if (!res) {
1989                                         if (num_crtc < 1)
1990                                                 num_crtc = 1;
1991                                         if (num_crtc > 6)
1992                                                 num_crtc = 6;
1993                                         adev->mode_info.num_crtc = num_crtc;
1994                                 } else {
1995                                         adev->mode_info.num_crtc = 1;
1996                                 }
1997                                 break;
1998                         }
1999                 }
2000
2001                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
2002                          amdgpu_virtual_display, pci_address_name,
2003                          adev->enable_virtual_display, adev->mode_info.num_crtc);
2004
2005                 kfree(pciaddstr);
2006         }
2007 }
2008
2009 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
2010 {
2011         if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
2012                 adev->mode_info.num_crtc = 1;
2013                 adev->enable_virtual_display = true;
2014                 DRM_INFO("virtual_display:%d, num_crtc:%d\n",
2015                          adev->enable_virtual_display, adev->mode_info.num_crtc);
2016         }
2017 }
2018
2019 /**
2020  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
2021  *
2022  * @adev: amdgpu_device pointer
2023  *
2024  * Parses the asic configuration parameters specified in the gpu info
2025  * firmware and makes them availale to the driver for use in configuring
2026  * the asic.
2027  * Returns 0 on success, -EINVAL on failure.
2028  */
2029 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
2030 {
2031         const char *chip_name;
2032         char fw_name[40];
2033         int err;
2034         const struct gpu_info_firmware_header_v1_0 *hdr;
2035
2036         adev->firmware.gpu_info_fw = NULL;
2037
2038         if (adev->mman.discovery_bin) {
2039                 /*
2040                  * FIXME: The bounding box is still needed by Navi12, so
2041                  * temporarily read it from gpu_info firmware. Should be dropped
2042                  * when DAL no longer needs it.
2043                  */
2044                 if (adev->asic_type != CHIP_NAVI12)
2045                         return 0;
2046         }
2047
2048         switch (adev->asic_type) {
2049         default:
2050                 return 0;
2051         case CHIP_VEGA10:
2052                 chip_name = "vega10";
2053                 break;
2054         case CHIP_VEGA12:
2055                 chip_name = "vega12";
2056                 break;
2057         case CHIP_RAVEN:
2058                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2059                         chip_name = "raven2";
2060                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
2061                         chip_name = "picasso";
2062                 else
2063                         chip_name = "raven";
2064                 break;
2065         case CHIP_ARCTURUS:
2066                 chip_name = "arcturus";
2067                 break;
2068         case CHIP_NAVI12:
2069                 chip_name = "navi12";
2070                 break;
2071         }
2072
2073         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
2074         err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
2075         if (err) {
2076                 dev_err(adev->dev,
2077                         "Failed to get gpu_info firmware \"%s\"\n",
2078                         fw_name);
2079                 goto out;
2080         }
2081
2082         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2083         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2084
2085         switch (hdr->version_major) {
2086         case 1:
2087         {
2088                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2089                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2090                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2091
2092                 /*
2093                  * Should be droped when DAL no longer needs it.
2094                  */
2095                 if (adev->asic_type == CHIP_NAVI12)
2096                         goto parse_soc_bounding_box;
2097
2098                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2099                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2100                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2101                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2102                 adev->gfx.config.max_texture_channel_caches =
2103                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
2104                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2105                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2106                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2107                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2108                 adev->gfx.config.double_offchip_lds_buf =
2109                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2110                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2111                 adev->gfx.cu_info.max_waves_per_simd =
2112                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2113                 adev->gfx.cu_info.max_scratch_slots_per_cu =
2114                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2115                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2116                 if (hdr->version_minor >= 1) {
2117                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2118                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2119                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2120                         adev->gfx.config.num_sc_per_sh =
2121                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2122                         adev->gfx.config.num_packer_per_sc =
2123                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2124                 }
2125
2126 parse_soc_bounding_box:
2127                 /*
2128                  * soc bounding box info is not integrated in disocovery table,
2129                  * we always need to parse it from gpu info firmware if needed.
2130                  */
2131                 if (hdr->version_minor == 2) {
2132                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2133                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2134                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2135                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2136                 }
2137                 break;
2138         }
2139         default:
2140                 dev_err(adev->dev,
2141                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2142                 err = -EINVAL;
2143                 goto out;
2144         }
2145 out:
2146         return err;
2147 }
2148
2149 /**
2150  * amdgpu_device_ip_early_init - run early init for hardware IPs
2151  *
2152  * @adev: amdgpu_device pointer
2153  *
2154  * Early initialization pass for hardware IPs.  The hardware IPs that make
2155  * up each asic are discovered each IP's early_init callback is run.  This
2156  * is the first stage in initializing the asic.
2157  * Returns 0 on success, negative error code on failure.
2158  */
2159 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2160 {
2161         struct drm_device *dev = adev_to_drm(adev);
2162         struct pci_dev *parent;
2163         int i, r;
2164         bool total;
2165
2166         amdgpu_device_enable_virtual_display(adev);
2167
2168         if (amdgpu_sriov_vf(adev)) {
2169                 r = amdgpu_virt_request_full_gpu(adev, true);
2170                 if (r)
2171                         return r;
2172         }
2173
2174         switch (adev->asic_type) {
2175 #ifdef CONFIG_DRM_AMDGPU_SI
2176         case CHIP_VERDE:
2177         case CHIP_TAHITI:
2178         case CHIP_PITCAIRN:
2179         case CHIP_OLAND:
2180         case CHIP_HAINAN:
2181                 adev->family = AMDGPU_FAMILY_SI;
2182                 r = si_set_ip_blocks(adev);
2183                 if (r)
2184                         return r;
2185                 break;
2186 #endif
2187 #ifdef CONFIG_DRM_AMDGPU_CIK
2188         case CHIP_BONAIRE:
2189         case CHIP_HAWAII:
2190         case CHIP_KAVERI:
2191         case CHIP_KABINI:
2192         case CHIP_MULLINS:
2193                 if (adev->flags & AMD_IS_APU)
2194                         adev->family = AMDGPU_FAMILY_KV;
2195                 else
2196                         adev->family = AMDGPU_FAMILY_CI;
2197
2198                 r = cik_set_ip_blocks(adev);
2199                 if (r)
2200                         return r;
2201                 break;
2202 #endif
2203         case CHIP_TOPAZ:
2204         case CHIP_TONGA:
2205         case CHIP_FIJI:
2206         case CHIP_POLARIS10:
2207         case CHIP_POLARIS11:
2208         case CHIP_POLARIS12:
2209         case CHIP_VEGAM:
2210         case CHIP_CARRIZO:
2211         case CHIP_STONEY:
2212                 if (adev->flags & AMD_IS_APU)
2213                         adev->family = AMDGPU_FAMILY_CZ;
2214                 else
2215                         adev->family = AMDGPU_FAMILY_VI;
2216
2217                 r = vi_set_ip_blocks(adev);
2218                 if (r)
2219                         return r;
2220                 break;
2221         default:
2222                 r = amdgpu_discovery_set_ip_blocks(adev);
2223                 if (r)
2224                         return r;
2225                 break;
2226         }
2227
2228         if (amdgpu_has_atpx() &&
2229             (amdgpu_is_atpx_hybrid() ||
2230              amdgpu_has_atpx_dgpu_power_cntl()) &&
2231             ((adev->flags & AMD_IS_APU) == 0) &&
2232             !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2233                 adev->flags |= AMD_IS_PX;
2234
2235         if (!(adev->flags & AMD_IS_APU)) {
2236                 parent = pci_upstream_bridge(adev->pdev);
2237                 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2238         }
2239
2240
2241         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2242         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2243                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2244         if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2245                 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2246
2247         total = true;
2248         for (i = 0; i < adev->num_ip_blocks; i++) {
2249                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2250                         DRM_WARN("disabled ip block: %d <%s>\n",
2251                                   i, adev->ip_blocks[i].version->funcs->name);
2252                         adev->ip_blocks[i].status.valid = false;
2253                 } else {
2254                         if (adev->ip_blocks[i].version->funcs->early_init) {
2255                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2256                                 if (r == -ENOENT) {
2257                                         adev->ip_blocks[i].status.valid = false;
2258                                 } else if (r) {
2259                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2260                                                   adev->ip_blocks[i].version->funcs->name, r);
2261                                         total = false;
2262                                 } else {
2263                                         adev->ip_blocks[i].status.valid = true;
2264                                 }
2265                         } else {
2266                                 adev->ip_blocks[i].status.valid = true;
2267                         }
2268                 }
2269                 /* get the vbios after the asic_funcs are set up */
2270                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2271                         r = amdgpu_device_parse_gpu_info_fw(adev);
2272                         if (r)
2273                                 return r;
2274
2275                         /* Read BIOS */
2276                         if (amdgpu_device_read_bios(adev)) {
2277                                 if (!amdgpu_get_bios(adev))
2278                                         return -EINVAL;
2279
2280                                 r = amdgpu_atombios_init(adev);
2281                                 if (r) {
2282                                         dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2283                                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2284                                         return r;
2285                                 }
2286                         }
2287
2288                         /*get pf2vf msg info at it's earliest time*/
2289                         if (amdgpu_sriov_vf(adev))
2290                                 amdgpu_virt_init_data_exchange(adev);
2291
2292                 }
2293         }
2294         if (!total)
2295                 return -ENODEV;
2296
2297         amdgpu_amdkfd_device_probe(adev);
2298         adev->cg_flags &= amdgpu_cg_mask;
2299         adev->pg_flags &= amdgpu_pg_mask;
2300
2301         return 0;
2302 }
2303
2304 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2305 {
2306         int i, r;
2307
2308         for (i = 0; i < adev->num_ip_blocks; i++) {
2309                 if (!adev->ip_blocks[i].status.sw)
2310                         continue;
2311                 if (adev->ip_blocks[i].status.hw)
2312                         continue;
2313                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2314                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2315                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2316                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2317                         if (r) {
2318                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2319                                           adev->ip_blocks[i].version->funcs->name, r);
2320                                 return r;
2321                         }
2322                         adev->ip_blocks[i].status.hw = true;
2323                 }
2324         }
2325
2326         return 0;
2327 }
2328
2329 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2330 {
2331         int i, r;
2332
2333         for (i = 0; i < adev->num_ip_blocks; i++) {
2334                 if (!adev->ip_blocks[i].status.sw)
2335                         continue;
2336                 if (adev->ip_blocks[i].status.hw)
2337                         continue;
2338                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2339                 if (r) {
2340                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2341                                   adev->ip_blocks[i].version->funcs->name, r);
2342                         return r;
2343                 }
2344                 adev->ip_blocks[i].status.hw = true;
2345         }
2346
2347         return 0;
2348 }
2349
2350 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2351 {
2352         int r = 0;
2353         int i;
2354         uint32_t smu_version;
2355
2356         if (adev->asic_type >= CHIP_VEGA10) {
2357                 for (i = 0; i < adev->num_ip_blocks; i++) {
2358                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2359                                 continue;
2360
2361                         if (!adev->ip_blocks[i].status.sw)
2362                                 continue;
2363
2364                         /* no need to do the fw loading again if already done*/
2365                         if (adev->ip_blocks[i].status.hw == true)
2366                                 break;
2367
2368                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2369                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2370                                 if (r) {
2371                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2372                                                           adev->ip_blocks[i].version->funcs->name, r);
2373                                         return r;
2374                                 }
2375                         } else {
2376                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2377                                 if (r) {
2378                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2379                                                           adev->ip_blocks[i].version->funcs->name, r);
2380                                         return r;
2381                                 }
2382                         }
2383
2384                         adev->ip_blocks[i].status.hw = true;
2385                         break;
2386                 }
2387         }
2388
2389         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2390                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2391
2392         return r;
2393 }
2394
2395 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2396 {
2397         long timeout;
2398         int r, i;
2399
2400         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2401                 struct amdgpu_ring *ring = adev->rings[i];
2402
2403                 /* No need to setup the GPU scheduler for rings that don't need it */
2404                 if (!ring || ring->no_scheduler)
2405                         continue;
2406
2407                 switch (ring->funcs->type) {
2408                 case AMDGPU_RING_TYPE_GFX:
2409                         timeout = adev->gfx_timeout;
2410                         break;
2411                 case AMDGPU_RING_TYPE_COMPUTE:
2412                         timeout = adev->compute_timeout;
2413                         break;
2414                 case AMDGPU_RING_TYPE_SDMA:
2415                         timeout = adev->sdma_timeout;
2416                         break;
2417                 default:
2418                         timeout = adev->video_timeout;
2419                         break;
2420                 }
2421
2422                 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2423                                    ring->num_hw_submission, 0,
2424                                    timeout, adev->reset_domain->wq,
2425                                    ring->sched_score, ring->name,
2426                                    adev->dev);
2427                 if (r) {
2428                         DRM_ERROR("Failed to create scheduler on ring %s.\n",
2429                                   ring->name);
2430                         return r;
2431                 }
2432         }
2433
2434         amdgpu_xcp_update_partition_sched_list(adev);
2435
2436         return 0;
2437 }
2438
2439
2440 /**
2441  * amdgpu_device_ip_init - run init for hardware IPs
2442  *
2443  * @adev: amdgpu_device pointer
2444  *
2445  * Main initialization pass for hardware IPs.  The list of all the hardware
2446  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2447  * are run.  sw_init initializes the software state associated with each IP
2448  * and hw_init initializes the hardware associated with each IP.
2449  * Returns 0 on success, negative error code on failure.
2450  */
2451 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2452 {
2453         int i, r;
2454
2455         r = amdgpu_ras_init(adev);
2456         if (r)
2457                 return r;
2458
2459         for (i = 0; i < adev->num_ip_blocks; i++) {
2460                 if (!adev->ip_blocks[i].status.valid)
2461                         continue;
2462                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2463                 if (r) {
2464                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2465                                   adev->ip_blocks[i].version->funcs->name, r);
2466                         goto init_failed;
2467                 }
2468                 adev->ip_blocks[i].status.sw = true;
2469
2470                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2471                         /* need to do common hw init early so everything is set up for gmc */
2472                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2473                         if (r) {
2474                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2475                                 goto init_failed;
2476                         }
2477                         adev->ip_blocks[i].status.hw = true;
2478                 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2479                         /* need to do gmc hw init early so we can allocate gpu mem */
2480                         /* Try to reserve bad pages early */
2481                         if (amdgpu_sriov_vf(adev))
2482                                 amdgpu_virt_exchange_data(adev);
2483
2484                         r = amdgpu_device_mem_scratch_init(adev);
2485                         if (r) {
2486                                 DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
2487                                 goto init_failed;
2488                         }
2489                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2490                         if (r) {
2491                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2492                                 goto init_failed;
2493                         }
2494                         r = amdgpu_device_wb_init(adev);
2495                         if (r) {
2496                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2497                                 goto init_failed;
2498                         }
2499                         adev->ip_blocks[i].status.hw = true;
2500
2501                         /* right after GMC hw init, we create CSA */
2502                         if (adev->gfx.mcbp) {
2503                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2504                                                                AMDGPU_GEM_DOMAIN_VRAM |
2505                                                                AMDGPU_GEM_DOMAIN_GTT,
2506                                                                AMDGPU_CSA_SIZE);
2507                                 if (r) {
2508                                         DRM_ERROR("allocate CSA failed %d\n", r);
2509                                         goto init_failed;
2510                                 }
2511                         }
2512                 }
2513         }
2514
2515         if (amdgpu_sriov_vf(adev))
2516                 amdgpu_virt_init_data_exchange(adev);
2517
2518         r = amdgpu_ib_pool_init(adev);
2519         if (r) {
2520                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2521                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2522                 goto init_failed;
2523         }
2524
2525         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2526         if (r)
2527                 goto init_failed;
2528
2529         r = amdgpu_device_ip_hw_init_phase1(adev);
2530         if (r)
2531                 goto init_failed;
2532
2533         r = amdgpu_device_fw_loading(adev);
2534         if (r)
2535                 goto init_failed;
2536
2537         r = amdgpu_device_ip_hw_init_phase2(adev);
2538         if (r)
2539                 goto init_failed;
2540
2541         /*
2542          * retired pages will be loaded from eeprom and reserved here,
2543          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2544          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2545          * for I2C communication which only true at this point.
2546          *
2547          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2548          * failure from bad gpu situation and stop amdgpu init process
2549          * accordingly. For other failed cases, it will still release all
2550          * the resource and print error message, rather than returning one
2551          * negative value to upper level.
2552          *
2553          * Note: theoretically, this should be called before all vram allocations
2554          * to protect retired page from abusing
2555          */
2556         r = amdgpu_ras_recovery_init(adev);
2557         if (r)
2558                 goto init_failed;
2559
2560         /**
2561          * In case of XGMI grab extra reference for reset domain for this device
2562          */
2563         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2564                 if (amdgpu_xgmi_add_device(adev) == 0) {
2565                         if (!amdgpu_sriov_vf(adev)) {
2566                                 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2567
2568                                 if (WARN_ON(!hive)) {
2569                                         r = -ENOENT;
2570                                         goto init_failed;
2571                                 }
2572
2573                                 if (!hive->reset_domain ||
2574                                     !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2575                                         r = -ENOENT;
2576                                         amdgpu_put_xgmi_hive(hive);
2577                                         goto init_failed;
2578                                 }
2579
2580                                 /* Drop the early temporary reset domain we created for device */
2581                                 amdgpu_reset_put_reset_domain(adev->reset_domain);
2582                                 adev->reset_domain = hive->reset_domain;
2583                                 amdgpu_put_xgmi_hive(hive);
2584                         }
2585                 }
2586         }
2587
2588         r = amdgpu_device_init_schedulers(adev);
2589         if (r)
2590                 goto init_failed;
2591
2592         /* Don't init kfd if whole hive need to be reset during init */
2593         if (!adev->gmc.xgmi.pending_reset) {
2594                 kgd2kfd_init_zone_device(adev);
2595                 amdgpu_amdkfd_device_init(adev);
2596         }
2597
2598         amdgpu_fru_get_product_info(adev);
2599
2600 init_failed:
2601
2602         return r;
2603 }
2604
2605 /**
2606  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2607  *
2608  * @adev: amdgpu_device pointer
2609  *
2610  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2611  * this function before a GPU reset.  If the value is retained after a
2612  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2613  */
2614 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2615 {
2616         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2617 }
2618
2619 /**
2620  * amdgpu_device_check_vram_lost - check if vram is valid
2621  *
2622  * @adev: amdgpu_device pointer
2623  *
2624  * Checks the reset magic value written to the gart pointer in VRAM.
2625  * The driver calls this after a GPU reset to see if the contents of
2626  * VRAM is lost or now.
2627  * returns true if vram is lost, false if not.
2628  */
2629 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2630 {
2631         if (memcmp(adev->gart.ptr, adev->reset_magic,
2632                         AMDGPU_RESET_MAGIC_NUM))
2633                 return true;
2634
2635         if (!amdgpu_in_reset(adev))
2636                 return false;
2637
2638         /*
2639          * For all ASICs with baco/mode1 reset, the VRAM is
2640          * always assumed to be lost.
2641          */
2642         switch (amdgpu_asic_reset_method(adev)) {
2643         case AMD_RESET_METHOD_BACO:
2644         case AMD_RESET_METHOD_MODE1:
2645                 return true;
2646         default:
2647                 return false;
2648         }
2649 }
2650
2651 /**
2652  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2653  *
2654  * @adev: amdgpu_device pointer
2655  * @state: clockgating state (gate or ungate)
2656  *
2657  * The list of all the hardware IPs that make up the asic is walked and the
2658  * set_clockgating_state callbacks are run.
2659  * Late initialization pass enabling clockgating for hardware IPs.
2660  * Fini or suspend, pass disabling clockgating for hardware IPs.
2661  * Returns 0 on success, negative error code on failure.
2662  */
2663
2664 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2665                                enum amd_clockgating_state state)
2666 {
2667         int i, j, r;
2668
2669         if (amdgpu_emu_mode == 1)
2670                 return 0;
2671
2672         for (j = 0; j < adev->num_ip_blocks; j++) {
2673                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2674                 if (!adev->ip_blocks[i].status.late_initialized)
2675                         continue;
2676                 /* skip CG for GFX, SDMA on S0ix */
2677                 if (adev->in_s0ix &&
2678                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2679                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2680                         continue;
2681                 /* skip CG for VCE/UVD, it's handled specially */
2682                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2683                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2684                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2685                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2686                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2687                         /* enable clockgating to save power */
2688                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2689                                                                                      state);
2690                         if (r) {
2691                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2692                                           adev->ip_blocks[i].version->funcs->name, r);
2693                                 return r;
2694                         }
2695                 }
2696         }
2697
2698         return 0;
2699 }
2700
2701 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2702                                enum amd_powergating_state state)
2703 {
2704         int i, j, r;
2705
2706         if (amdgpu_emu_mode == 1)
2707                 return 0;
2708
2709         for (j = 0; j < adev->num_ip_blocks; j++) {
2710                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2711                 if (!adev->ip_blocks[i].status.late_initialized)
2712                         continue;
2713                 /* skip PG for GFX, SDMA on S0ix */
2714                 if (adev->in_s0ix &&
2715                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2716                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2717                         continue;
2718                 /* skip CG for VCE/UVD, it's handled specially */
2719                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2720                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2721                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2722                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2723                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2724                         /* enable powergating to save power */
2725                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2726                                                                                         state);
2727                         if (r) {
2728                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2729                                           adev->ip_blocks[i].version->funcs->name, r);
2730                                 return r;
2731                         }
2732                 }
2733         }
2734         return 0;
2735 }
2736
2737 static int amdgpu_device_enable_mgpu_fan_boost(void)
2738 {
2739         struct amdgpu_gpu_instance *gpu_ins;
2740         struct amdgpu_device *adev;
2741         int i, ret = 0;
2742
2743         mutex_lock(&mgpu_info.mutex);
2744
2745         /*
2746          * MGPU fan boost feature should be enabled
2747          * only when there are two or more dGPUs in
2748          * the system
2749          */
2750         if (mgpu_info.num_dgpu < 2)
2751                 goto out;
2752
2753         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2754                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2755                 adev = gpu_ins->adev;
2756                 if (!(adev->flags & AMD_IS_APU) &&
2757                     !gpu_ins->mgpu_fan_enabled) {
2758                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2759                         if (ret)
2760                                 break;
2761
2762                         gpu_ins->mgpu_fan_enabled = 1;
2763                 }
2764         }
2765
2766 out:
2767         mutex_unlock(&mgpu_info.mutex);
2768
2769         return ret;
2770 }
2771
2772 /**
2773  * amdgpu_device_ip_late_init - run late init for hardware IPs
2774  *
2775  * @adev: amdgpu_device pointer
2776  *
2777  * Late initialization pass for hardware IPs.  The list of all the hardware
2778  * IPs that make up the asic is walked and the late_init callbacks are run.
2779  * late_init covers any special initialization that an IP requires
2780  * after all of the have been initialized or something that needs to happen
2781  * late in the init process.
2782  * Returns 0 on success, negative error code on failure.
2783  */
2784 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2785 {
2786         struct amdgpu_gpu_instance *gpu_instance;
2787         int i = 0, r;
2788
2789         for (i = 0; i < adev->num_ip_blocks; i++) {
2790                 if (!adev->ip_blocks[i].status.hw)
2791                         continue;
2792                 if (adev->ip_blocks[i].version->funcs->late_init) {
2793                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2794                         if (r) {
2795                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2796                                           adev->ip_blocks[i].version->funcs->name, r);
2797                                 return r;
2798                         }
2799                 }
2800                 adev->ip_blocks[i].status.late_initialized = true;
2801         }
2802
2803         r = amdgpu_ras_late_init(adev);
2804         if (r) {
2805                 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2806                 return r;
2807         }
2808
2809         amdgpu_ras_set_error_query_ready(adev, true);
2810
2811         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2812         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2813
2814         amdgpu_device_fill_reset_magic(adev);
2815
2816         r = amdgpu_device_enable_mgpu_fan_boost();
2817         if (r)
2818                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2819
2820         /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2821         if (amdgpu_passthrough(adev) &&
2822             ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
2823              adev->asic_type == CHIP_ALDEBARAN))
2824                 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2825
2826         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2827                 mutex_lock(&mgpu_info.mutex);
2828
2829                 /*
2830                  * Reset device p-state to low as this was booted with high.
2831                  *
2832                  * This should be performed only after all devices from the same
2833                  * hive get initialized.
2834                  *
2835                  * However, it's unknown how many device in the hive in advance.
2836                  * As this is counted one by one during devices initializations.
2837                  *
2838                  * So, we wait for all XGMI interlinked devices initialized.
2839                  * This may bring some delays as those devices may come from
2840                  * different hives. But that should be OK.
2841                  */
2842                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2843                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2844                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2845                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2846                                         continue;
2847
2848                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2849                                                 AMDGPU_XGMI_PSTATE_MIN);
2850                                 if (r) {
2851                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2852                                         break;
2853                                 }
2854                         }
2855                 }
2856
2857                 mutex_unlock(&mgpu_info.mutex);
2858         }
2859
2860         return 0;
2861 }
2862
2863 /**
2864  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2865  *
2866  * @adev: amdgpu_device pointer
2867  *
2868  * For ASICs need to disable SMC first
2869  */
2870 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2871 {
2872         int i, r;
2873
2874         if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2875                 return;
2876
2877         for (i = 0; i < adev->num_ip_blocks; i++) {
2878                 if (!adev->ip_blocks[i].status.hw)
2879                         continue;
2880                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2881                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2882                         /* XXX handle errors */
2883                         if (r) {
2884                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2885                                           adev->ip_blocks[i].version->funcs->name, r);
2886                         }
2887                         adev->ip_blocks[i].status.hw = false;
2888                         break;
2889                 }
2890         }
2891 }
2892
2893 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2894 {
2895         int i, r;
2896
2897         for (i = 0; i < adev->num_ip_blocks; i++) {
2898                 if (!adev->ip_blocks[i].version->funcs->early_fini)
2899                         continue;
2900
2901                 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2902                 if (r) {
2903                         DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2904                                   adev->ip_blocks[i].version->funcs->name, r);
2905                 }
2906         }
2907
2908         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2909         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2910
2911         amdgpu_amdkfd_suspend(adev, false);
2912
2913         /* Workaroud for ASICs need to disable SMC first */
2914         amdgpu_device_smu_fini_early(adev);
2915
2916         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2917                 if (!adev->ip_blocks[i].status.hw)
2918                         continue;
2919
2920                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2921                 /* XXX handle errors */
2922                 if (r) {
2923                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2924                                   adev->ip_blocks[i].version->funcs->name, r);
2925                 }
2926
2927                 adev->ip_blocks[i].status.hw = false;
2928         }
2929
2930         if (amdgpu_sriov_vf(adev)) {
2931                 if (amdgpu_virt_release_full_gpu(adev, false))
2932                         DRM_ERROR("failed to release exclusive mode on fini\n");
2933         }
2934
2935         return 0;
2936 }
2937
2938 /**
2939  * amdgpu_device_ip_fini - run fini for hardware IPs
2940  *
2941  * @adev: amdgpu_device pointer
2942  *
2943  * Main teardown pass for hardware IPs.  The list of all the hardware
2944  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2945  * are run.  hw_fini tears down the hardware associated with each IP
2946  * and sw_fini tears down any software state associated with each IP.
2947  * Returns 0 on success, negative error code on failure.
2948  */
2949 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2950 {
2951         int i, r;
2952
2953         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2954                 amdgpu_virt_release_ras_err_handler_data(adev);
2955
2956         if (adev->gmc.xgmi.num_physical_nodes > 1)
2957                 amdgpu_xgmi_remove_device(adev);
2958
2959         amdgpu_amdkfd_device_fini_sw(adev);
2960
2961         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2962                 if (!adev->ip_blocks[i].status.sw)
2963                         continue;
2964
2965                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2966                         amdgpu_ucode_free_bo(adev);
2967                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2968                         amdgpu_device_wb_fini(adev);
2969                         amdgpu_device_mem_scratch_fini(adev);
2970                         amdgpu_ib_pool_fini(adev);
2971                 }
2972
2973                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2974                 /* XXX handle errors */
2975                 if (r) {
2976                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2977                                   adev->ip_blocks[i].version->funcs->name, r);
2978                 }
2979                 adev->ip_blocks[i].status.sw = false;
2980                 adev->ip_blocks[i].status.valid = false;
2981         }
2982
2983         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2984                 if (!adev->ip_blocks[i].status.late_initialized)
2985                         continue;
2986                 if (adev->ip_blocks[i].version->funcs->late_fini)
2987                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2988                 adev->ip_blocks[i].status.late_initialized = false;
2989         }
2990
2991         amdgpu_ras_fini(adev);
2992
2993         return 0;
2994 }
2995
2996 /**
2997  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2998  *
2999  * @work: work_struct.
3000  */
3001 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
3002 {
3003         struct amdgpu_device *adev =
3004                 container_of(work, struct amdgpu_device, delayed_init_work.work);
3005         int r;
3006
3007         r = amdgpu_ib_ring_tests(adev);
3008         if (r)
3009                 DRM_ERROR("ib ring test failed (%d).\n", r);
3010 }
3011
3012 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
3013 {
3014         struct amdgpu_device *adev =
3015                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
3016
3017         WARN_ON_ONCE(adev->gfx.gfx_off_state);
3018         WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
3019
3020         if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
3021                 adev->gfx.gfx_off_state = true;
3022 }
3023
3024 /**
3025  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
3026  *
3027  * @adev: amdgpu_device pointer
3028  *
3029  * Main suspend function for hardware IPs.  The list of all the hardware
3030  * IPs that make up the asic is walked, clockgating is disabled and the
3031  * suspend callbacks are run.  suspend puts the hardware and software state
3032  * in each IP into a state suitable for suspend.
3033  * Returns 0 on success, negative error code on failure.
3034  */
3035 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
3036 {
3037         int i, r;
3038
3039         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3040         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3041
3042         /*
3043          * Per PMFW team's suggestion, driver needs to handle gfxoff
3044          * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
3045          * scenario. Add the missing df cstate disablement here.
3046          */
3047         if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
3048                 dev_warn(adev->dev, "Failed to disallow df cstate");
3049
3050         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3051                 if (!adev->ip_blocks[i].status.valid)
3052                         continue;
3053
3054                 /* displays are handled separately */
3055                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
3056                         continue;
3057
3058                 /* XXX handle errors */
3059                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3060                 /* XXX handle errors */
3061                 if (r) {
3062                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
3063                                   adev->ip_blocks[i].version->funcs->name, r);
3064                         return r;
3065                 }
3066
3067                 adev->ip_blocks[i].status.hw = false;
3068         }
3069
3070         return 0;
3071 }
3072
3073 /**
3074  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3075  *
3076  * @adev: amdgpu_device pointer
3077  *
3078  * Main suspend function for hardware IPs.  The list of all the hardware
3079  * IPs that make up the asic is walked, clockgating is disabled and the
3080  * suspend callbacks are run.  suspend puts the hardware and software state
3081  * in each IP into a state suitable for suspend.
3082  * Returns 0 on success, negative error code on failure.
3083  */
3084 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3085 {
3086         int i, r;
3087
3088         if (adev->in_s0ix)
3089                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3090
3091         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3092                 if (!adev->ip_blocks[i].status.valid)
3093                         continue;
3094                 /* displays are handled in phase1 */
3095                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3096                         continue;
3097                 /* PSP lost connection when err_event_athub occurs */
3098                 if (amdgpu_ras_intr_triggered() &&
3099                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3100                         adev->ip_blocks[i].status.hw = false;
3101                         continue;
3102                 }
3103
3104                 /* skip unnecessary suspend if we do not initialize them yet */
3105                 if (adev->gmc.xgmi.pending_reset &&
3106                     !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3107                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3108                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3109                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3110                         adev->ip_blocks[i].status.hw = false;
3111                         continue;
3112                 }
3113
3114                 /* skip suspend of gfx/mes and psp for S0ix
3115                  * gfx is in gfxoff state, so on resume it will exit gfxoff just
3116                  * like at runtime. PSP is also part of the always on hardware
3117                  * so no need to suspend it.
3118                  */
3119                 if (adev->in_s0ix &&
3120                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3121                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3122                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3123                         continue;
3124
3125                 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3126                 if (adev->in_s0ix &&
3127                     (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&
3128                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
3129                         continue;
3130
3131                 /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3132                  * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3133                  * from this location and RLC Autoload automatically also gets loaded
3134                  * from here based on PMFW -> PSP message during re-init sequence.
3135                  * Therefore, the psp suspend & resume should be skipped to avoid destroy
3136                  * the TMR and reload FWs again for IMU enabled APU ASICs.
3137                  */
3138                 if (amdgpu_in_reset(adev) &&
3139                     (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3140                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3141                         continue;
3142
3143                 /* XXX handle errors */
3144                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3145                 /* XXX handle errors */
3146                 if (r) {
3147                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
3148                                   adev->ip_blocks[i].version->funcs->name, r);
3149                 }
3150                 adev->ip_blocks[i].status.hw = false;
3151                 /* handle putting the SMC in the appropriate state */
3152                 if (!amdgpu_sriov_vf(adev)) {
3153                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3154                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3155                                 if (r) {
3156                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3157                                                         adev->mp1_state, r);
3158                                         return r;
3159                                 }
3160                         }
3161                 }
3162         }
3163
3164         return 0;
3165 }
3166
3167 /**
3168  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3169  *
3170  * @adev: amdgpu_device pointer
3171  *
3172  * Main suspend function for hardware IPs.  The list of all the hardware
3173  * IPs that make up the asic is walked, clockgating is disabled and the
3174  * suspend callbacks are run.  suspend puts the hardware and software state
3175  * in each IP into a state suitable for suspend.
3176  * Returns 0 on success, negative error code on failure.
3177  */
3178 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3179 {
3180         int r;
3181
3182         if (amdgpu_sriov_vf(adev)) {
3183                 amdgpu_virt_fini_data_exchange(adev);
3184                 amdgpu_virt_request_full_gpu(adev, false);
3185         }
3186
3187         r = amdgpu_device_ip_suspend_phase1(adev);
3188         if (r)
3189                 return r;
3190         r = amdgpu_device_ip_suspend_phase2(adev);
3191
3192         if (amdgpu_sriov_vf(adev))
3193                 amdgpu_virt_release_full_gpu(adev, false);
3194
3195         return r;
3196 }
3197
3198 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3199 {
3200         int i, r;
3201
3202         static enum amd_ip_block_type ip_order[] = {
3203                 AMD_IP_BLOCK_TYPE_COMMON,
3204                 AMD_IP_BLOCK_TYPE_GMC,
3205                 AMD_IP_BLOCK_TYPE_PSP,
3206                 AMD_IP_BLOCK_TYPE_IH,
3207         };
3208
3209         for (i = 0; i < adev->num_ip_blocks; i++) {
3210                 int j;
3211                 struct amdgpu_ip_block *block;
3212
3213                 block = &adev->ip_blocks[i];
3214                 block->status.hw = false;
3215
3216                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3217
3218                         if (block->version->type != ip_order[j] ||
3219                                 !block->status.valid)
3220                                 continue;
3221
3222                         r = block->version->funcs->hw_init(adev);
3223                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3224                         if (r)
3225                                 return r;
3226                         block->status.hw = true;
3227                 }
3228         }
3229
3230         return 0;
3231 }
3232
3233 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3234 {
3235         int i, r;
3236
3237         static enum amd_ip_block_type ip_order[] = {
3238                 AMD_IP_BLOCK_TYPE_SMC,
3239                 AMD_IP_BLOCK_TYPE_DCE,
3240                 AMD_IP_BLOCK_TYPE_GFX,
3241                 AMD_IP_BLOCK_TYPE_SDMA,
3242                 AMD_IP_BLOCK_TYPE_MES,
3243                 AMD_IP_BLOCK_TYPE_UVD,
3244                 AMD_IP_BLOCK_TYPE_VCE,
3245                 AMD_IP_BLOCK_TYPE_VCN,
3246                 AMD_IP_BLOCK_TYPE_JPEG
3247         };
3248
3249         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3250                 int j;
3251                 struct amdgpu_ip_block *block;
3252
3253                 for (j = 0; j < adev->num_ip_blocks; j++) {
3254                         block = &adev->ip_blocks[j];
3255
3256                         if (block->version->type != ip_order[i] ||
3257                                 !block->status.valid ||
3258                                 block->status.hw)
3259                                 continue;
3260
3261                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3262                                 r = block->version->funcs->resume(adev);
3263                         else
3264                                 r = block->version->funcs->hw_init(adev);
3265
3266                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3267                         if (r)
3268                                 return r;
3269                         block->status.hw = true;
3270                 }
3271         }
3272
3273         return 0;
3274 }
3275
3276 /**
3277  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3278  *
3279  * @adev: amdgpu_device pointer
3280  *
3281  * First resume function for hardware IPs.  The list of all the hardware
3282  * IPs that make up the asic is walked and the resume callbacks are run for
3283  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3284  * after a suspend and updates the software state as necessary.  This
3285  * function is also used for restoring the GPU after a GPU reset.
3286  * Returns 0 on success, negative error code on failure.
3287  */
3288 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3289 {
3290         int i, r;
3291
3292         for (i = 0; i < adev->num_ip_blocks; i++) {
3293                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3294                         continue;
3295                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3296                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3297                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3298                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3299
3300                         r = adev->ip_blocks[i].version->funcs->resume(adev);
3301                         if (r) {
3302                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
3303                                           adev->ip_blocks[i].version->funcs->name, r);
3304                                 return r;
3305                         }
3306                         adev->ip_blocks[i].status.hw = true;
3307                 }
3308         }
3309
3310         return 0;
3311 }
3312
3313 /**
3314  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3315  *
3316  * @adev: amdgpu_device pointer
3317  *
3318  * First resume function for hardware IPs.  The list of all the hardware
3319  * IPs that make up the asic is walked and the resume callbacks are run for
3320  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3321  * functional state after a suspend and updates the software state as
3322  * necessary.  This function is also used for restoring the GPU after a GPU
3323  * reset.
3324  * Returns 0 on success, negative error code on failure.
3325  */
3326 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3327 {
3328         int i, r;
3329
3330         for (i = 0; i < adev->num_ip_blocks; i++) {
3331                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3332                         continue;
3333                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3334                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3335                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3336                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3337                         continue;
3338                 r = adev->ip_blocks[i].version->funcs->resume(adev);
3339                 if (r) {
3340                         DRM_ERROR("resume of IP block <%s> failed %d\n",
3341                                   adev->ip_blocks[i].version->funcs->name, r);
3342                         return r;
3343                 }
3344                 adev->ip_blocks[i].status.hw = true;
3345         }
3346
3347         return 0;
3348 }
3349
3350 /**
3351  * amdgpu_device_ip_resume - run resume for hardware IPs
3352  *
3353  * @adev: amdgpu_device pointer
3354  *
3355  * Main resume function for hardware IPs.  The hardware IPs
3356  * are split into two resume functions because they are
3357  * also used in recovering from a GPU reset and some additional
3358  * steps need to be take between them.  In this case (S3/S4) they are
3359  * run sequentially.
3360  * Returns 0 on success, negative error code on failure.
3361  */
3362 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3363 {
3364         int r;
3365
3366         r = amdgpu_device_ip_resume_phase1(adev);
3367         if (r)
3368                 return r;
3369
3370         r = amdgpu_device_fw_loading(adev);
3371         if (r)
3372                 return r;
3373
3374         r = amdgpu_device_ip_resume_phase2(adev);
3375
3376         return r;
3377 }
3378
3379 /**
3380  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3381  *
3382  * @adev: amdgpu_device pointer
3383  *
3384  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3385  */
3386 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3387 {
3388         if (amdgpu_sriov_vf(adev)) {
3389                 if (adev->is_atom_fw) {
3390                         if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3391                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3392                 } else {
3393                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3394                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3395                 }
3396
3397                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3398                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3399         }
3400 }
3401
3402 /**
3403  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3404  *
3405  * @asic_type: AMD asic type
3406  *
3407  * Check if there is DC (new modesetting infrastructre) support for an asic.
3408  * returns true if DC has support, false if not.
3409  */
3410 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3411 {
3412         switch (asic_type) {
3413 #ifdef CONFIG_DRM_AMDGPU_SI
3414         case CHIP_HAINAN:
3415 #endif
3416         case CHIP_TOPAZ:
3417                 /* chips with no display hardware */
3418                 return false;
3419 #if defined(CONFIG_DRM_AMD_DC)
3420         case CHIP_TAHITI:
3421         case CHIP_PITCAIRN:
3422         case CHIP_VERDE:
3423         case CHIP_OLAND:
3424                 /*
3425                  * We have systems in the wild with these ASICs that require
3426                  * LVDS and VGA support which is not supported with DC.
3427                  *
3428                  * Fallback to the non-DC driver here by default so as not to
3429                  * cause regressions.
3430                  */
3431 #if defined(CONFIG_DRM_AMD_DC_SI)
3432                 return amdgpu_dc > 0;
3433 #else
3434                 return false;
3435 #endif
3436         case CHIP_BONAIRE:
3437         case CHIP_KAVERI:
3438         case CHIP_KABINI:
3439         case CHIP_MULLINS:
3440                 /*
3441                  * We have systems in the wild with these ASICs that require
3442                  * VGA support which is not supported with DC.
3443                  *
3444                  * Fallback to the non-DC driver here by default so as not to
3445                  * cause regressions.
3446                  */
3447                 return amdgpu_dc > 0;
3448         default:
3449                 return amdgpu_dc != 0;
3450 #else
3451         default:
3452                 if (amdgpu_dc > 0)
3453                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
3454                 return false;
3455 #endif
3456         }
3457 }
3458
3459 /**
3460  * amdgpu_device_has_dc_support - check if dc is supported
3461  *
3462  * @adev: amdgpu_device pointer
3463  *
3464  * Returns true for supported, false for not supported
3465  */
3466 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3467 {
3468         if (adev->enable_virtual_display ||
3469             (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3470                 return false;
3471
3472         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3473 }
3474
3475 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3476 {
3477         struct amdgpu_device *adev =
3478                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3479         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3480
3481         /* It's a bug to not have a hive within this function */
3482         if (WARN_ON(!hive))
3483                 return;
3484
3485         /*
3486          * Use task barrier to synchronize all xgmi reset works across the
3487          * hive. task_barrier_enter and task_barrier_exit will block
3488          * until all the threads running the xgmi reset works reach
3489          * those points. task_barrier_full will do both blocks.
3490          */
3491         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3492
3493                 task_barrier_enter(&hive->tb);
3494                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3495
3496                 if (adev->asic_reset_res)
3497                         goto fail;
3498
3499                 task_barrier_exit(&hive->tb);
3500                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3501
3502                 if (adev->asic_reset_res)
3503                         goto fail;
3504
3505                 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3506                     adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3507                         adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3508         } else {
3509
3510                 task_barrier_full(&hive->tb);
3511                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3512         }
3513
3514 fail:
3515         if (adev->asic_reset_res)
3516                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3517                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3518         amdgpu_put_xgmi_hive(hive);
3519 }
3520
3521 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3522 {
3523         char *input = amdgpu_lockup_timeout;
3524         char *timeout_setting = NULL;
3525         int index = 0;
3526         long timeout;
3527         int ret = 0;
3528
3529         /*
3530          * By default timeout for non compute jobs is 10000
3531          * and 60000 for compute jobs.
3532          * In SR-IOV or passthrough mode, timeout for compute
3533          * jobs are 60000 by default.
3534          */
3535         adev->gfx_timeout = msecs_to_jiffies(10000);
3536         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3537         if (amdgpu_sriov_vf(adev))
3538                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3539                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3540         else
3541                 adev->compute_timeout =  msecs_to_jiffies(60000);
3542
3543         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3544                 while ((timeout_setting = strsep(&input, ",")) &&
3545                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3546                         ret = kstrtol(timeout_setting, 0, &timeout);
3547                         if (ret)
3548                                 return ret;
3549
3550                         if (timeout == 0) {
3551                                 index++;
3552                                 continue;
3553                         } else if (timeout < 0) {
3554                                 timeout = MAX_SCHEDULE_TIMEOUT;
3555                                 dev_warn(adev->dev, "lockup timeout disabled");
3556                                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3557                         } else {
3558                                 timeout = msecs_to_jiffies(timeout);
3559                         }
3560
3561                         switch (index++) {
3562                         case 0:
3563                                 adev->gfx_timeout = timeout;
3564                                 break;
3565                         case 1:
3566                                 adev->compute_timeout = timeout;
3567                                 break;
3568                         case 2:
3569                                 adev->sdma_timeout = timeout;
3570                                 break;
3571                         case 3:
3572                                 adev->video_timeout = timeout;
3573                                 break;
3574                         default:
3575                                 break;
3576                         }
3577                 }
3578                 /*
3579                  * There is only one value specified and
3580                  * it should apply to all non-compute jobs.
3581                  */
3582                 if (index == 1) {
3583                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3584                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3585                                 adev->compute_timeout = adev->gfx_timeout;
3586                 }
3587         }
3588
3589         return ret;
3590 }
3591
3592 /**
3593  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3594  *
3595  * @adev: amdgpu_device pointer
3596  *
3597  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3598  */
3599 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3600 {
3601         struct iommu_domain *domain;
3602
3603         domain = iommu_get_domain_for_dev(adev->dev);
3604         if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3605                 adev->ram_is_direct_mapped = true;
3606 }
3607
3608 static const struct attribute *amdgpu_dev_attributes[] = {
3609         &dev_attr_pcie_replay_count.attr,
3610         NULL
3611 };
3612
3613 static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
3614 {
3615         if (amdgpu_mcbp == 1)
3616                 adev->gfx.mcbp = true;
3617         else if (amdgpu_mcbp == 0)
3618                 adev->gfx.mcbp = false;
3619         else if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) &&
3620                  (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) &&
3621                  adev->gfx.num_gfx_rings)
3622                 adev->gfx.mcbp = true;
3623
3624         if (amdgpu_sriov_vf(adev))
3625                 adev->gfx.mcbp = true;
3626
3627         if (adev->gfx.mcbp)
3628                 DRM_INFO("MCBP is enabled\n");
3629 }
3630
3631 /**
3632  * amdgpu_device_init - initialize the driver
3633  *
3634  * @adev: amdgpu_device pointer
3635  * @flags: driver flags
3636  *
3637  * Initializes the driver info and hw (all asics).
3638  * Returns 0 for success or an error on failure.
3639  * Called at driver startup.
3640  */
3641 int amdgpu_device_init(struct amdgpu_device *adev,
3642                        uint32_t flags)
3643 {
3644         struct drm_device *ddev = adev_to_drm(adev);
3645         struct pci_dev *pdev = adev->pdev;
3646         int r, i;
3647         bool px = false;
3648         u32 max_MBps;
3649         int tmp;
3650
3651         adev->shutdown = false;
3652         adev->flags = flags;
3653
3654         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3655                 adev->asic_type = amdgpu_force_asic_type;
3656         else
3657                 adev->asic_type = flags & AMD_ASIC_MASK;
3658
3659         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3660         if (amdgpu_emu_mode == 1)
3661                 adev->usec_timeout *= 10;
3662         adev->gmc.gart_size = 512 * 1024 * 1024;
3663         adev->accel_working = false;
3664         adev->num_rings = 0;
3665         RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3666         adev->mman.buffer_funcs = NULL;
3667         adev->mman.buffer_funcs_ring = NULL;
3668         adev->vm_manager.vm_pte_funcs = NULL;
3669         adev->vm_manager.vm_pte_num_scheds = 0;
3670         adev->gmc.gmc_funcs = NULL;
3671         adev->harvest_ip_mask = 0x0;
3672         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3673         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3674
3675         adev->smc_rreg = &amdgpu_invalid_rreg;
3676         adev->smc_wreg = &amdgpu_invalid_wreg;
3677         adev->pcie_rreg = &amdgpu_invalid_rreg;
3678         adev->pcie_wreg = &amdgpu_invalid_wreg;
3679         adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
3680         adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
3681         adev->pciep_rreg = &amdgpu_invalid_rreg;
3682         adev->pciep_wreg = &amdgpu_invalid_wreg;
3683         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3684         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3685         adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext;
3686         adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext;
3687         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3688         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3689         adev->didt_rreg = &amdgpu_invalid_rreg;
3690         adev->didt_wreg = &amdgpu_invalid_wreg;
3691         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3692         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3693         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3694         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3695
3696         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3697                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3698                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3699
3700         /* mutex initialization are all done here so we
3701          * can recall function without having locking issues
3702          */
3703         mutex_init(&adev->firmware.mutex);
3704         mutex_init(&adev->pm.mutex);
3705         mutex_init(&adev->gfx.gpu_clock_mutex);
3706         mutex_init(&adev->srbm_mutex);
3707         mutex_init(&adev->gfx.pipe_reserve_mutex);
3708         mutex_init(&adev->gfx.gfx_off_mutex);
3709         mutex_init(&adev->gfx.partition_mutex);
3710         mutex_init(&adev->grbm_idx_mutex);
3711         mutex_init(&adev->mn_lock);
3712         mutex_init(&adev->virt.vf_errors.lock);
3713         hash_init(adev->mn_hash);
3714         mutex_init(&adev->psp.mutex);
3715         mutex_init(&adev->notifier_lock);
3716         mutex_init(&adev->pm.stable_pstate_ctx_lock);
3717         mutex_init(&adev->benchmark_mutex);
3718
3719         amdgpu_device_init_apu_flags(adev);
3720
3721         r = amdgpu_device_check_arguments(adev);
3722         if (r)
3723                 return r;
3724
3725         spin_lock_init(&adev->mmio_idx_lock);
3726         spin_lock_init(&adev->smc_idx_lock);
3727         spin_lock_init(&adev->pcie_idx_lock);
3728         spin_lock_init(&adev->uvd_ctx_idx_lock);
3729         spin_lock_init(&adev->didt_idx_lock);
3730         spin_lock_init(&adev->gc_cac_idx_lock);
3731         spin_lock_init(&adev->se_cac_idx_lock);
3732         spin_lock_init(&adev->audio_endpt_idx_lock);
3733         spin_lock_init(&adev->mm_stats.lock);
3734
3735         INIT_LIST_HEAD(&adev->shadow_list);
3736         mutex_init(&adev->shadow_list_lock);
3737
3738         INIT_LIST_HEAD(&adev->reset_list);
3739
3740         INIT_LIST_HEAD(&adev->ras_list);
3741
3742         INIT_LIST_HEAD(&adev->pm.od_kobj_list);
3743
3744         INIT_DELAYED_WORK(&adev->delayed_init_work,
3745                           amdgpu_device_delayed_init_work_handler);
3746         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3747                           amdgpu_device_delay_enable_gfx_off);
3748
3749         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3750
3751         adev->gfx.gfx_off_req_count = 1;
3752         adev->gfx.gfx_off_residency = 0;
3753         adev->gfx.gfx_off_entrycount = 0;
3754         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3755
3756         atomic_set(&adev->throttling_logging_enabled, 1);
3757         /*
3758          * If throttling continues, logging will be performed every minute
3759          * to avoid log flooding. "-1" is subtracted since the thermal
3760          * throttling interrupt comes every second. Thus, the total logging
3761          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3762          * for throttling interrupt) = 60 seconds.
3763          */
3764         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3765         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3766
3767         /* Registers mapping */
3768         /* TODO: block userspace mapping of io register */
3769         if (adev->asic_type >= CHIP_BONAIRE) {
3770                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3771                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3772         } else {
3773                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3774                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3775         }
3776
3777         for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3778                 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3779
3780         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3781         if (!adev->rmmio)
3782                 return -ENOMEM;
3783
3784         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3785         DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
3786
3787         /*
3788          * Reset domain needs to be present early, before XGMI hive discovered
3789          * (if any) and intitialized to use reset sem and in_gpu reset flag
3790          * early on during init and before calling to RREG32.
3791          */
3792         adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3793         if (!adev->reset_domain)
3794                 return -ENOMEM;
3795
3796         /* detect hw virtualization here */
3797         amdgpu_detect_virtualization(adev);
3798
3799         amdgpu_device_get_pcie_info(adev);
3800
3801         r = amdgpu_device_get_job_timeout_settings(adev);
3802         if (r) {
3803                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3804                 return r;
3805         }
3806
3807         /* early init functions */
3808         r = amdgpu_device_ip_early_init(adev);
3809         if (r)
3810                 return r;
3811
3812         amdgpu_device_set_mcbp(adev);
3813
3814         /* Get rid of things like offb */
3815         r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
3816         if (r)
3817                 return r;
3818
3819         /* Enable TMZ based on IP_VERSION */
3820         amdgpu_gmc_tmz_set(adev);
3821
3822         amdgpu_gmc_noretry_set(adev);
3823         /* Need to get xgmi info early to decide the reset behavior*/
3824         if (adev->gmc.xgmi.supported) {
3825                 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3826                 if (r)
3827                         return r;
3828         }
3829
3830         /* enable PCIE atomic ops */
3831         if (amdgpu_sriov_vf(adev)) {
3832                 if (adev->virt.fw_reserve.p_pf2vf)
3833                         adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3834                                                       adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3835                                 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3836         /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
3837          * internal path natively support atomics, set have_atomics_support to true.
3838          */
3839         } else if ((adev->flags & AMD_IS_APU) &&
3840                    (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))) {
3841                 adev->have_atomics_support = true;
3842         } else {
3843                 adev->have_atomics_support =
3844                         !pci_enable_atomic_ops_to_root(adev->pdev,
3845                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3846                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3847         }
3848
3849         if (!adev->have_atomics_support)
3850                 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3851
3852         /* doorbell bar mapping and doorbell index init*/
3853         amdgpu_doorbell_init(adev);
3854
3855         if (amdgpu_emu_mode == 1) {
3856                 /* post the asic on emulation mode */
3857                 emu_soc_asic_init(adev);
3858                 goto fence_driver_init;
3859         }
3860
3861         amdgpu_reset_init(adev);
3862
3863         /* detect if we are with an SRIOV vbios */
3864         if (adev->bios)
3865                 amdgpu_device_detect_sriov_bios(adev);
3866
3867         /* check if we need to reset the asic
3868          *  E.g., driver was not cleanly unloaded previously, etc.
3869          */
3870         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3871                 if (adev->gmc.xgmi.num_physical_nodes) {
3872                         dev_info(adev->dev, "Pending hive reset.\n");
3873                         adev->gmc.xgmi.pending_reset = true;
3874                         /* Only need to init necessary block for SMU to handle the reset */
3875                         for (i = 0; i < adev->num_ip_blocks; i++) {
3876                                 if (!adev->ip_blocks[i].status.valid)
3877                                         continue;
3878                                 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3879                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3880                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3881                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3882                                         DRM_DEBUG("IP %s disabled for hw_init.\n",
3883                                                 adev->ip_blocks[i].version->funcs->name);
3884                                         adev->ip_blocks[i].status.hw = true;
3885                                 }
3886                         }
3887                 } else {
3888                         tmp = amdgpu_reset_method;
3889                         /* It should do a default reset when loading or reloading the driver,
3890                          * regardless of the module parameter reset_method.
3891                          */
3892                         amdgpu_reset_method = AMD_RESET_METHOD_NONE;
3893                         r = amdgpu_asic_reset(adev);
3894                         amdgpu_reset_method = tmp;
3895                         if (r) {
3896                                 dev_err(adev->dev, "asic reset on init failed\n");
3897                                 goto failed;
3898                         }
3899                 }
3900         }
3901
3902         /* Post card if necessary */
3903         if (amdgpu_device_need_post(adev)) {
3904                 if (!adev->bios) {
3905                         dev_err(adev->dev, "no vBIOS found\n");
3906                         r = -EINVAL;
3907                         goto failed;
3908                 }
3909                 DRM_INFO("GPU posting now...\n");
3910                 r = amdgpu_device_asic_init(adev);
3911                 if (r) {
3912                         dev_err(adev->dev, "gpu post error!\n");
3913                         goto failed;
3914                 }
3915         }
3916
3917         if (adev->bios) {
3918                 if (adev->is_atom_fw) {
3919                         /* Initialize clocks */
3920                         r = amdgpu_atomfirmware_get_clock_info(adev);
3921                         if (r) {
3922                                 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3923                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3924                                 goto failed;
3925                         }
3926                 } else {
3927                         /* Initialize clocks */
3928                         r = amdgpu_atombios_get_clock_info(adev);
3929                         if (r) {
3930                                 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3931                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3932                                 goto failed;
3933                         }
3934                         /* init i2c buses */
3935                         if (!amdgpu_device_has_dc_support(adev))
3936                                 amdgpu_atombios_i2c_init(adev);
3937                 }
3938         }
3939
3940 fence_driver_init:
3941         /* Fence driver */
3942         r = amdgpu_fence_driver_sw_init(adev);
3943         if (r) {
3944                 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3945                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3946                 goto failed;
3947         }
3948
3949         /* init the mode config */
3950         drm_mode_config_init(adev_to_drm(adev));
3951
3952         r = amdgpu_device_ip_init(adev);
3953         if (r) {
3954                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3955                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3956                 goto release_ras_con;
3957         }
3958
3959         amdgpu_fence_driver_hw_init(adev);
3960
3961         dev_info(adev->dev,
3962                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3963                         adev->gfx.config.max_shader_engines,
3964                         adev->gfx.config.max_sh_per_se,
3965                         adev->gfx.config.max_cu_per_sh,
3966                         adev->gfx.cu_info.number);
3967
3968         adev->accel_working = true;
3969
3970         amdgpu_vm_check_compute_bug(adev);
3971
3972         /* Initialize the buffer migration limit. */
3973         if (amdgpu_moverate >= 0)
3974                 max_MBps = amdgpu_moverate;
3975         else
3976                 max_MBps = 8; /* Allow 8 MB/s. */
3977         /* Get a log2 for easy divisions. */
3978         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3979
3980         /*
3981          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3982          * Otherwise the mgpu fan boost feature will be skipped due to the
3983          * gpu instance is counted less.
3984          */
3985         amdgpu_register_gpu_instance(adev);
3986
3987         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3988          * explicit gating rather than handling it automatically.
3989          */
3990         if (!adev->gmc.xgmi.pending_reset) {
3991                 r = amdgpu_device_ip_late_init(adev);
3992                 if (r) {
3993                         dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3994                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3995                         goto release_ras_con;
3996                 }
3997                 /* must succeed. */
3998                 amdgpu_ras_resume(adev);
3999                 queue_delayed_work(system_wq, &adev->delayed_init_work,
4000                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
4001         }
4002
4003         if (amdgpu_sriov_vf(adev)) {
4004                 amdgpu_virt_release_full_gpu(adev, true);
4005                 flush_delayed_work(&adev->delayed_init_work);
4006         }
4007
4008         /*
4009          * Place those sysfs registering after `late_init`. As some of those
4010          * operations performed in `late_init` might affect the sysfs
4011          * interfaces creating.
4012          */
4013         r = amdgpu_atombios_sysfs_init(adev);
4014         if (r)
4015                 drm_err(&adev->ddev,
4016                         "registering atombios sysfs failed (%d).\n", r);
4017
4018         r = amdgpu_pm_sysfs_init(adev);
4019         if (r)
4020                 DRM_ERROR("registering pm sysfs failed (%d).\n", r);
4021
4022         r = amdgpu_ucode_sysfs_init(adev);
4023         if (r) {
4024                 adev->ucode_sysfs_en = false;
4025                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
4026         } else
4027                 adev->ucode_sysfs_en = true;
4028
4029         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
4030         if (r)
4031                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
4032
4033         amdgpu_fru_sysfs_init(adev);
4034
4035         if (IS_ENABLED(CONFIG_PERF_EVENTS))
4036                 r = amdgpu_pmu_init(adev);
4037         if (r)
4038                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4039
4040         /* Have stored pci confspace at hand for restore in sudden PCI error */
4041         if (amdgpu_device_cache_pci_state(adev->pdev))
4042                 pci_restore_state(pdev);
4043
4044         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
4045         /* this will fail for cards that aren't VGA class devices, just
4046          * ignore it
4047          */
4048         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4049                 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4050
4051         px = amdgpu_device_supports_px(ddev);
4052
4053         if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
4054                                 apple_gmux_detect(NULL, NULL)))
4055                 vga_switcheroo_register_client(adev->pdev,
4056                                                &amdgpu_switcheroo_ops, px);
4057
4058         if (px)
4059                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
4060
4061         if (adev->gmc.xgmi.pending_reset)
4062                 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
4063                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
4064
4065         amdgpu_device_check_iommu_direct_map(adev);
4066
4067         return 0;
4068
4069 release_ras_con:
4070         if (amdgpu_sriov_vf(adev))
4071                 amdgpu_virt_release_full_gpu(adev, true);
4072
4073         /* failed in exclusive mode due to timeout */
4074         if (amdgpu_sriov_vf(adev) &&
4075                 !amdgpu_sriov_runtime(adev) &&
4076                 amdgpu_virt_mmio_blocked(adev) &&
4077                 !amdgpu_virt_wait_reset(adev)) {
4078                 dev_err(adev->dev, "VF exclusive mode timeout\n");
4079                 /* Don't send request since VF is inactive. */
4080                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4081                 adev->virt.ops = NULL;
4082                 r = -EAGAIN;
4083         }
4084         amdgpu_release_ras_context(adev);
4085
4086 failed:
4087         amdgpu_vf_error_trans_all(adev);
4088
4089         return r;
4090 }
4091
4092 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4093 {
4094
4095         /* Clear all CPU mappings pointing to this device */
4096         unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4097
4098         /* Unmap all mapped bars - Doorbell, registers and VRAM */
4099         amdgpu_doorbell_fini(adev);
4100
4101         iounmap(adev->rmmio);
4102         adev->rmmio = NULL;
4103         if (adev->mman.aper_base_kaddr)
4104                 iounmap(adev->mman.aper_base_kaddr);
4105         adev->mman.aper_base_kaddr = NULL;
4106
4107         /* Memory manager related */
4108         if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4109                 arch_phys_wc_del(adev->gmc.vram_mtrr);
4110                 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4111         }
4112 }
4113
4114 /**
4115  * amdgpu_device_fini_hw - tear down the driver
4116  *
4117  * @adev: amdgpu_device pointer
4118  *
4119  * Tear down the driver info (all asics).
4120  * Called at driver shutdown.
4121  */
4122 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4123 {
4124         dev_info(adev->dev, "amdgpu: finishing device.\n");
4125         flush_delayed_work(&adev->delayed_init_work);
4126         adev->shutdown = true;
4127
4128         /* make sure IB test finished before entering exclusive mode
4129          * to avoid preemption on IB test
4130          */
4131         if (amdgpu_sriov_vf(adev)) {
4132                 amdgpu_virt_request_full_gpu(adev, false);
4133                 amdgpu_virt_fini_data_exchange(adev);
4134         }
4135
4136         /* disable all interrupts */
4137         amdgpu_irq_disable_all(adev);
4138         if (adev->mode_info.mode_config_initialized) {
4139                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4140                         drm_helper_force_disable_all(adev_to_drm(adev));
4141                 else
4142                         drm_atomic_helper_shutdown(adev_to_drm(adev));
4143         }
4144         amdgpu_fence_driver_hw_fini(adev);
4145
4146         if (adev->mman.initialized)
4147                 drain_workqueue(adev->mman.bdev.wq);
4148
4149         if (adev->pm.sysfs_initialized)
4150                 amdgpu_pm_sysfs_fini(adev);
4151         if (adev->ucode_sysfs_en)
4152                 amdgpu_ucode_sysfs_fini(adev);
4153         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4154         amdgpu_fru_sysfs_fini(adev);
4155
4156         /* disable ras feature must before hw fini */
4157         amdgpu_ras_pre_fini(adev);
4158
4159         amdgpu_device_ip_fini_early(adev);
4160
4161         amdgpu_irq_fini_hw(adev);
4162
4163         if (adev->mman.initialized)
4164                 ttm_device_clear_dma_mappings(&adev->mman.bdev);
4165
4166         amdgpu_gart_dummy_page_fini(adev);
4167
4168         if (drm_dev_is_unplugged(adev_to_drm(adev)))
4169                 amdgpu_device_unmap_mmio(adev);
4170
4171 }
4172
4173 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4174 {
4175         int idx;
4176         bool px;
4177
4178         amdgpu_fence_driver_sw_fini(adev);
4179         amdgpu_device_ip_fini(adev);
4180         amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4181         adev->accel_working = false;
4182         dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4183
4184         amdgpu_reset_fini(adev);
4185
4186         /* free i2c buses */
4187         if (!amdgpu_device_has_dc_support(adev))
4188                 amdgpu_i2c_fini(adev);
4189
4190         if (amdgpu_emu_mode != 1)
4191                 amdgpu_atombios_fini(adev);
4192
4193         kfree(adev->bios);
4194         adev->bios = NULL;
4195
4196         px = amdgpu_device_supports_px(adev_to_drm(adev));
4197
4198         if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
4199                                 apple_gmux_detect(NULL, NULL)))
4200                 vga_switcheroo_unregister_client(adev->pdev);
4201
4202         if (px)
4203                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4204
4205         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4206                 vga_client_unregister(adev->pdev);
4207
4208         if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4209
4210                 iounmap(adev->rmmio);
4211                 adev->rmmio = NULL;
4212                 amdgpu_doorbell_fini(adev);
4213                 drm_dev_exit(idx);
4214         }
4215
4216         if (IS_ENABLED(CONFIG_PERF_EVENTS))
4217                 amdgpu_pmu_fini(adev);
4218         if (adev->mman.discovery_bin)
4219                 amdgpu_discovery_fini(adev);
4220
4221         amdgpu_reset_put_reset_domain(adev->reset_domain);
4222         adev->reset_domain = NULL;
4223
4224         kfree(adev->pci_state);
4225
4226 }
4227
4228 /**
4229  * amdgpu_device_evict_resources - evict device resources
4230  * @adev: amdgpu device object
4231  *
4232  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4233  * of the vram memory type. Mainly used for evicting device resources
4234  * at suspend time.
4235  *
4236  */
4237 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4238 {
4239         int ret;
4240
4241         /* No need to evict vram on APUs for suspend to ram or s2idle */
4242         if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4243                 return 0;
4244
4245         ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4246         if (ret)
4247                 DRM_WARN("evicting device resources failed\n");
4248         return ret;
4249 }
4250
4251 /*
4252  * Suspend & resume.
4253  */
4254 /**
4255  * amdgpu_device_suspend - initiate device suspend
4256  *
4257  * @dev: drm dev pointer
4258  * @fbcon : notify the fbdev of suspend
4259  *
4260  * Puts the hw in the suspend state (all asics).
4261  * Returns 0 for success or an error on failure.
4262  * Called at driver suspend.
4263  */
4264 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4265 {
4266         struct amdgpu_device *adev = drm_to_adev(dev);
4267         int r = 0;
4268
4269         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4270                 return 0;
4271
4272         adev->in_suspend = true;
4273
4274         /* Evict the majority of BOs before grabbing the full access */
4275         r = amdgpu_device_evict_resources(adev);
4276         if (r)
4277                 return r;
4278
4279         if (amdgpu_sriov_vf(adev)) {
4280                 amdgpu_virt_fini_data_exchange(adev);
4281                 r = amdgpu_virt_request_full_gpu(adev, false);
4282                 if (r)
4283                         return r;
4284         }
4285
4286         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4287                 DRM_WARN("smart shift update failed\n");
4288
4289         if (fbcon)
4290                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4291
4292         cancel_delayed_work_sync(&adev->delayed_init_work);
4293         flush_delayed_work(&adev->gfx.gfx_off_delay_work);
4294
4295         amdgpu_ras_suspend(adev);
4296
4297         amdgpu_device_ip_suspend_phase1(adev);
4298
4299         if (!adev->in_s0ix)
4300                 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4301
4302         r = amdgpu_device_evict_resources(adev);
4303         if (r)
4304                 return r;
4305
4306         amdgpu_fence_driver_hw_fini(adev);
4307
4308         amdgpu_device_ip_suspend_phase2(adev);
4309
4310         if (amdgpu_sriov_vf(adev))
4311                 amdgpu_virt_release_full_gpu(adev, false);
4312
4313         return 0;
4314 }
4315
4316 /**
4317  * amdgpu_device_resume - initiate device resume
4318  *
4319  * @dev: drm dev pointer
4320  * @fbcon : notify the fbdev of resume
4321  *
4322  * Bring the hw back to operating state (all asics).
4323  * Returns 0 for success or an error on failure.
4324  * Called at driver resume.
4325  */
4326 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4327 {
4328         struct amdgpu_device *adev = drm_to_adev(dev);
4329         int r = 0;
4330
4331         if (amdgpu_sriov_vf(adev)) {
4332                 r = amdgpu_virt_request_full_gpu(adev, true);
4333                 if (r)
4334                         return r;
4335         }
4336
4337         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4338                 return 0;
4339
4340         if (adev->in_s0ix)
4341                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4342
4343         /* post card */
4344         if (amdgpu_device_need_post(adev)) {
4345                 r = amdgpu_device_asic_init(adev);
4346                 if (r)
4347                         dev_err(adev->dev, "amdgpu asic init failed\n");
4348         }
4349
4350         r = amdgpu_device_ip_resume(adev);
4351
4352         if (r) {
4353                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4354                 goto exit;
4355         }
4356         amdgpu_fence_driver_hw_init(adev);
4357
4358         r = amdgpu_device_ip_late_init(adev);
4359         if (r)
4360                 goto exit;
4361
4362         queue_delayed_work(system_wq, &adev->delayed_init_work,
4363                            msecs_to_jiffies(AMDGPU_RESUME_MS));
4364
4365         if (!adev->in_s0ix) {
4366                 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4367                 if (r)
4368                         goto exit;
4369         }
4370
4371 exit:
4372         if (amdgpu_sriov_vf(adev)) {
4373                 amdgpu_virt_init_data_exchange(adev);
4374                 amdgpu_virt_release_full_gpu(adev, true);
4375         }
4376
4377         if (r)
4378                 return r;
4379
4380         /* Make sure IB tests flushed */
4381         flush_delayed_work(&adev->delayed_init_work);
4382
4383         if (fbcon)
4384                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4385
4386         amdgpu_ras_resume(adev);
4387
4388         if (adev->mode_info.num_crtc) {
4389                 /*
4390                  * Most of the connector probing functions try to acquire runtime pm
4391                  * refs to ensure that the GPU is powered on when connector polling is
4392                  * performed. Since we're calling this from a runtime PM callback,
4393                  * trying to acquire rpm refs will cause us to deadlock.
4394                  *
4395                  * Since we're guaranteed to be holding the rpm lock, it's safe to
4396                  * temporarily disable the rpm helpers so this doesn't deadlock us.
4397                  */
4398 #ifdef CONFIG_PM
4399                 dev->dev->power.disable_depth++;
4400 #endif
4401                 if (!adev->dc_enabled)
4402                         drm_helper_hpd_irq_event(dev);
4403                 else
4404                         drm_kms_helper_hotplug_event(dev);
4405 #ifdef CONFIG_PM
4406                 dev->dev->power.disable_depth--;
4407 #endif
4408         }
4409         adev->in_suspend = false;
4410
4411         if (adev->enable_mes)
4412                 amdgpu_mes_self_test(adev);
4413
4414         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4415                 DRM_WARN("smart shift update failed\n");
4416
4417         return 0;
4418 }
4419
4420 /**
4421  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4422  *
4423  * @adev: amdgpu_device pointer
4424  *
4425  * The list of all the hardware IPs that make up the asic is walked and
4426  * the check_soft_reset callbacks are run.  check_soft_reset determines
4427  * if the asic is still hung or not.
4428  * Returns true if any of the IPs are still in a hung state, false if not.
4429  */
4430 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4431 {
4432         int i;
4433         bool asic_hang = false;
4434
4435         if (amdgpu_sriov_vf(adev))
4436                 return true;
4437
4438         if (amdgpu_asic_need_full_reset(adev))
4439                 return true;
4440
4441         for (i = 0; i < adev->num_ip_blocks; i++) {
4442                 if (!adev->ip_blocks[i].status.valid)
4443                         continue;
4444                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4445                         adev->ip_blocks[i].status.hang =
4446                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4447                 if (adev->ip_blocks[i].status.hang) {
4448                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4449                         asic_hang = true;
4450                 }
4451         }
4452         return asic_hang;
4453 }
4454
4455 /**
4456  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4457  *
4458  * @adev: amdgpu_device pointer
4459  *
4460  * The list of all the hardware IPs that make up the asic is walked and the
4461  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4462  * handles any IP specific hardware or software state changes that are
4463  * necessary for a soft reset to succeed.
4464  * Returns 0 on success, negative error code on failure.
4465  */
4466 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4467 {
4468         int i, r = 0;
4469
4470         for (i = 0; i < adev->num_ip_blocks; i++) {
4471                 if (!adev->ip_blocks[i].status.valid)
4472                         continue;
4473                 if (adev->ip_blocks[i].status.hang &&
4474                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4475                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4476                         if (r)
4477                                 return r;
4478                 }
4479         }
4480
4481         return 0;
4482 }
4483
4484 /**
4485  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4486  *
4487  * @adev: amdgpu_device pointer
4488  *
4489  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4490  * reset is necessary to recover.
4491  * Returns true if a full asic reset is required, false if not.
4492  */
4493 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4494 {
4495         int i;
4496
4497         if (amdgpu_asic_need_full_reset(adev))
4498                 return true;
4499
4500         for (i = 0; i < adev->num_ip_blocks; i++) {
4501                 if (!adev->ip_blocks[i].status.valid)
4502                         continue;
4503                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4504                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4505                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4506                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4507                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4508                         if (adev->ip_blocks[i].status.hang) {
4509                                 dev_info(adev->dev, "Some block need full reset!\n");
4510                                 return true;
4511                         }
4512                 }
4513         }
4514         return false;
4515 }
4516
4517 /**
4518  * amdgpu_device_ip_soft_reset - do a soft reset
4519  *
4520  * @adev: amdgpu_device pointer
4521  *
4522  * The list of all the hardware IPs that make up the asic is walked and the
4523  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4524  * IP specific hardware or software state changes that are necessary to soft
4525  * reset the IP.
4526  * Returns 0 on success, negative error code on failure.
4527  */
4528 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4529 {
4530         int i, r = 0;
4531
4532         for (i = 0; i < adev->num_ip_blocks; i++) {
4533                 if (!adev->ip_blocks[i].status.valid)
4534                         continue;
4535                 if (adev->ip_blocks[i].status.hang &&
4536                     adev->ip_blocks[i].version->funcs->soft_reset) {
4537                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4538                         if (r)
4539                                 return r;
4540                 }
4541         }
4542
4543         return 0;
4544 }
4545
4546 /**
4547  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4548  *
4549  * @adev: amdgpu_device pointer
4550  *
4551  * The list of all the hardware IPs that make up the asic is walked and the
4552  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4553  * handles any IP specific hardware or software state changes that are
4554  * necessary after the IP has been soft reset.
4555  * Returns 0 on success, negative error code on failure.
4556  */
4557 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4558 {
4559         int i, r = 0;
4560
4561         for (i = 0; i < adev->num_ip_blocks; i++) {
4562                 if (!adev->ip_blocks[i].status.valid)
4563                         continue;
4564                 if (adev->ip_blocks[i].status.hang &&
4565                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4566                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4567                 if (r)
4568                         return r;
4569         }
4570
4571         return 0;
4572 }
4573
4574 /**
4575  * amdgpu_device_recover_vram - Recover some VRAM contents
4576  *
4577  * @adev: amdgpu_device pointer
4578  *
4579  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4580  * restore things like GPUVM page tables after a GPU reset where
4581  * the contents of VRAM might be lost.
4582  *
4583  * Returns:
4584  * 0 on success, negative error code on failure.
4585  */
4586 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4587 {
4588         struct dma_fence *fence = NULL, *next = NULL;
4589         struct amdgpu_bo *shadow;
4590         struct amdgpu_bo_vm *vmbo;
4591         long r = 1, tmo;
4592
4593         if (amdgpu_sriov_runtime(adev))
4594                 tmo = msecs_to_jiffies(8000);
4595         else
4596                 tmo = msecs_to_jiffies(100);
4597
4598         dev_info(adev->dev, "recover vram bo from shadow start\n");
4599         mutex_lock(&adev->shadow_list_lock);
4600         list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4601                 /* If vm is compute context or adev is APU, shadow will be NULL */
4602                 if (!vmbo->shadow)
4603                         continue;
4604                 shadow = vmbo->shadow;
4605
4606                 /* No need to recover an evicted BO */
4607                 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4608                     shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4609                     shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4610                         continue;
4611
4612                 r = amdgpu_bo_restore_shadow(shadow, &next);
4613                 if (r)
4614                         break;
4615
4616                 if (fence) {
4617                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4618                         dma_fence_put(fence);
4619                         fence = next;
4620                         if (tmo == 0) {
4621                                 r = -ETIMEDOUT;
4622                                 break;
4623                         } else if (tmo < 0) {
4624                                 r = tmo;
4625                                 break;
4626                         }
4627                 } else {
4628                         fence = next;
4629                 }
4630         }
4631         mutex_unlock(&adev->shadow_list_lock);
4632
4633         if (fence)
4634                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4635         dma_fence_put(fence);
4636
4637         if (r < 0 || tmo <= 0) {
4638                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4639                 return -EIO;
4640         }
4641
4642         dev_info(adev->dev, "recover vram bo from shadow done\n");
4643         return 0;
4644 }
4645
4646
4647 /**
4648  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4649  *
4650  * @adev: amdgpu_device pointer
4651  * @from_hypervisor: request from hypervisor
4652  *
4653  * do VF FLR and reinitialize Asic
4654  * return 0 means succeeded otherwise failed
4655  */
4656 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4657                                      bool from_hypervisor)
4658 {
4659         int r;
4660         struct amdgpu_hive_info *hive = NULL;
4661         int retry_limit = 0;
4662
4663 retry:
4664         amdgpu_amdkfd_pre_reset(adev);
4665
4666         if (from_hypervisor)
4667                 r = amdgpu_virt_request_full_gpu(adev, true);
4668         else
4669                 r = amdgpu_virt_reset_gpu(adev);
4670         if (r)
4671                 return r;
4672         amdgpu_irq_gpu_reset_resume_helper(adev);
4673
4674         /* some sw clean up VF needs to do before recover */
4675         amdgpu_virt_post_reset(adev);
4676
4677         /* Resume IP prior to SMC */
4678         r = amdgpu_device_ip_reinit_early_sriov(adev);
4679         if (r)
4680                 goto error;
4681
4682         amdgpu_virt_init_data_exchange(adev);
4683
4684         r = amdgpu_device_fw_loading(adev);
4685         if (r)
4686                 return r;
4687
4688         /* now we are okay to resume SMC/CP/SDMA */
4689         r = amdgpu_device_ip_reinit_late_sriov(adev);
4690         if (r)
4691                 goto error;
4692
4693         hive = amdgpu_get_xgmi_hive(adev);
4694         /* Update PSP FW topology after reset */
4695         if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4696                 r = amdgpu_xgmi_update_topology(hive, adev);
4697
4698         if (hive)
4699                 amdgpu_put_xgmi_hive(hive);
4700
4701         if (!r) {
4702                 r = amdgpu_ib_ring_tests(adev);
4703
4704                 amdgpu_amdkfd_post_reset(adev);
4705         }
4706
4707 error:
4708         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4709                 amdgpu_inc_vram_lost(adev);
4710                 r = amdgpu_device_recover_vram(adev);
4711         }
4712         amdgpu_virt_release_full_gpu(adev, true);
4713
4714         if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4715                 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4716                         retry_limit++;
4717                         goto retry;
4718                 } else
4719                         DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4720         }
4721
4722         return r;
4723 }
4724
4725 /**
4726  * amdgpu_device_has_job_running - check if there is any job in mirror list
4727  *
4728  * @adev: amdgpu_device pointer
4729  *
4730  * check if there is any job in mirror list
4731  */
4732 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4733 {
4734         int i;
4735         struct drm_sched_job *job;
4736
4737         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4738                 struct amdgpu_ring *ring = adev->rings[i];
4739
4740                 if (!ring || !ring->sched.thread)
4741                         continue;
4742
4743                 spin_lock(&ring->sched.job_list_lock);
4744                 job = list_first_entry_or_null(&ring->sched.pending_list,
4745                                                struct drm_sched_job, list);
4746                 spin_unlock(&ring->sched.job_list_lock);
4747                 if (job)
4748                         return true;
4749         }
4750         return false;
4751 }
4752
4753 /**
4754  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4755  *
4756  * @adev: amdgpu_device pointer
4757  *
4758  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4759  * a hung GPU.
4760  */
4761 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4762 {
4763
4764         if (amdgpu_gpu_recovery == 0)
4765                 goto disabled;
4766
4767         /* Skip soft reset check in fatal error mode */
4768         if (!amdgpu_ras_is_poison_mode_supported(adev))
4769                 return true;
4770
4771         if (amdgpu_sriov_vf(adev))
4772                 return true;
4773
4774         if (amdgpu_gpu_recovery == -1) {
4775                 switch (adev->asic_type) {
4776 #ifdef CONFIG_DRM_AMDGPU_SI
4777                 case CHIP_VERDE:
4778                 case CHIP_TAHITI:
4779                 case CHIP_PITCAIRN:
4780                 case CHIP_OLAND:
4781                 case CHIP_HAINAN:
4782 #endif
4783 #ifdef CONFIG_DRM_AMDGPU_CIK
4784                 case CHIP_KAVERI:
4785                 case CHIP_KABINI:
4786                 case CHIP_MULLINS:
4787 #endif
4788                 case CHIP_CARRIZO:
4789                 case CHIP_STONEY:
4790                 case CHIP_CYAN_SKILLFISH:
4791                         goto disabled;
4792                 default:
4793                         break;
4794                 }
4795         }
4796
4797         return true;
4798
4799 disabled:
4800                 dev_info(adev->dev, "GPU recovery disabled.\n");
4801                 return false;
4802 }
4803
4804 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4805 {
4806         u32 i;
4807         int ret = 0;
4808
4809         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4810
4811         dev_info(adev->dev, "GPU mode1 reset\n");
4812
4813         /* disable BM */
4814         pci_clear_master(adev->pdev);
4815
4816         amdgpu_device_cache_pci_state(adev->pdev);
4817
4818         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4819                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4820                 ret = amdgpu_dpm_mode1_reset(adev);
4821         } else {
4822                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4823                 ret = psp_gpu_reset(adev);
4824         }
4825
4826         if (ret)
4827                 goto mode1_reset_failed;
4828
4829         amdgpu_device_load_pci_state(adev->pdev);
4830         ret = amdgpu_psp_wait_for_bootloader(adev);
4831         if (ret)
4832                 goto mode1_reset_failed;
4833
4834         /* wait for asic to come out of reset */
4835         for (i = 0; i < adev->usec_timeout; i++) {
4836                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4837
4838                 if (memsize != 0xffffffff)
4839                         break;
4840                 udelay(1);
4841         }
4842
4843         if (i >= adev->usec_timeout) {
4844                 ret = -ETIMEDOUT;
4845                 goto mode1_reset_failed;
4846         }
4847
4848         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4849
4850         return 0;
4851
4852 mode1_reset_failed:
4853         dev_err(adev->dev, "GPU mode1 reset failed\n");
4854         return ret;
4855 }
4856
4857 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4858                                  struct amdgpu_reset_context *reset_context)
4859 {
4860         int i, r = 0;
4861         struct amdgpu_job *job = NULL;
4862         bool need_full_reset =
4863                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4864
4865         if (reset_context->reset_req_dev == adev)
4866                 job = reset_context->job;
4867
4868         if (amdgpu_sriov_vf(adev)) {
4869                 /* stop the data exchange thread */
4870                 amdgpu_virt_fini_data_exchange(adev);
4871         }
4872
4873         amdgpu_fence_driver_isr_toggle(adev, true);
4874
4875         /* block all schedulers and reset given job's ring */
4876         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4877                 struct amdgpu_ring *ring = adev->rings[i];
4878
4879                 if (!ring || !ring->sched.thread)
4880                         continue;
4881
4882                 /* Clear job fence from fence drv to avoid force_completion
4883                  * leave NULL and vm flush fence in fence drv
4884                  */
4885                 amdgpu_fence_driver_clear_job_fences(ring);
4886
4887                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4888                 amdgpu_fence_driver_force_completion(ring);
4889         }
4890
4891         amdgpu_fence_driver_isr_toggle(adev, false);
4892
4893         if (job && job->vm)
4894                 drm_sched_increase_karma(&job->base);
4895
4896         r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4897         /* If reset handler not implemented, continue; otherwise return */
4898         if (r == -EOPNOTSUPP)
4899                 r = 0;
4900         else
4901                 return r;
4902
4903         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4904         if (!amdgpu_sriov_vf(adev)) {
4905
4906                 if (!need_full_reset)
4907                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4908
4909                 if (!need_full_reset && amdgpu_gpu_recovery &&
4910                     amdgpu_device_ip_check_soft_reset(adev)) {
4911                         amdgpu_device_ip_pre_soft_reset(adev);
4912                         r = amdgpu_device_ip_soft_reset(adev);
4913                         amdgpu_device_ip_post_soft_reset(adev);
4914                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4915                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4916                                 need_full_reset = true;
4917                         }
4918                 }
4919
4920                 if (need_full_reset)
4921                         r = amdgpu_device_ip_suspend(adev);
4922                 if (need_full_reset)
4923                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4924                 else
4925                         clear_bit(AMDGPU_NEED_FULL_RESET,
4926                                   &reset_context->flags);
4927         }
4928
4929         return r;
4930 }
4931
4932 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4933 {
4934         int i;
4935
4936         lockdep_assert_held(&adev->reset_domain->sem);
4937
4938         for (i = 0; i < adev->num_regs; i++) {
4939                 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4940                 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4941                                              adev->reset_dump_reg_value[i]);
4942         }
4943
4944         return 0;
4945 }
4946
4947 #ifdef CONFIG_DEV_COREDUMP
4948 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4949                 size_t count, void *data, size_t datalen)
4950 {
4951         struct drm_printer p;
4952         struct amdgpu_device *adev = data;
4953         struct drm_print_iterator iter;
4954         int i;
4955
4956         iter.data = buffer;
4957         iter.offset = 0;
4958         iter.start = offset;
4959         iter.remain = count;
4960
4961         p = drm_coredump_printer(&iter);
4962
4963         drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4964         drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4965         drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4966         drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4967         if (adev->reset_task_info.pid)
4968                 drm_printf(&p, "process_name: %s PID: %d\n",
4969                            adev->reset_task_info.process_name,
4970                            adev->reset_task_info.pid);
4971
4972         if (adev->reset_vram_lost)
4973                 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4974         if (adev->num_regs) {
4975                 drm_printf(&p, "AMDGPU register dumps:\nOffset:     Value:\n");
4976
4977                 for (i = 0; i < adev->num_regs; i++)
4978                         drm_printf(&p, "0x%08x: 0x%08x\n",
4979                                    adev->reset_dump_reg_list[i],
4980                                    adev->reset_dump_reg_value[i]);
4981         }
4982
4983         return count - iter.remain;
4984 }
4985
4986 static void amdgpu_devcoredump_free(void *data)
4987 {
4988 }
4989
4990 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4991 {
4992         struct drm_device *dev = adev_to_drm(adev);
4993
4994         ktime_get_ts64(&adev->reset_time);
4995         dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_NOWAIT,
4996                       amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4997 }
4998 #endif
4999
5000 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
5001                          struct amdgpu_reset_context *reset_context)
5002 {
5003         struct amdgpu_device *tmp_adev = NULL;
5004         bool need_full_reset, skip_hw_reset, vram_lost = false;
5005         int r = 0;
5006         bool gpu_reset_for_dev_remove = 0;
5007
5008         /* Try reset handler method first */
5009         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5010                                     reset_list);
5011         amdgpu_reset_reg_dumps(tmp_adev);
5012
5013         reset_context->reset_device_list = device_list_handle;
5014         r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
5015         /* If reset handler not implemented, continue; otherwise return */
5016         if (r == -EOPNOTSUPP)
5017                 r = 0;
5018         else
5019                 return r;
5020
5021         /* Reset handler not implemented, use the default method */
5022         need_full_reset =
5023                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5024         skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
5025
5026         gpu_reset_for_dev_remove =
5027                 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5028                         test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5029
5030         /*
5031          * ASIC reset has to be done on all XGMI hive nodes ASAP
5032          * to allow proper links negotiation in FW (within 1 sec)
5033          */
5034         if (!skip_hw_reset && need_full_reset) {
5035                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5036                         /* For XGMI run all resets in parallel to speed up the process */
5037                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5038                                 tmp_adev->gmc.xgmi.pending_reset = false;
5039                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
5040                                         r = -EALREADY;
5041                         } else
5042                                 r = amdgpu_asic_reset(tmp_adev);
5043
5044                         if (r) {
5045                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
5046                                          r, adev_to_drm(tmp_adev)->unique);
5047                                 break;
5048                         }
5049                 }
5050
5051                 /* For XGMI wait for all resets to complete before proceed */
5052                 if (!r) {
5053                         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5054                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5055                                         flush_work(&tmp_adev->xgmi_reset_work);
5056                                         r = tmp_adev->asic_reset_res;
5057                                         if (r)
5058                                                 break;
5059                                 }
5060                         }
5061                 }
5062         }
5063
5064         if (!r && amdgpu_ras_intr_triggered()) {
5065                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5066                         if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
5067                             tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
5068                                 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
5069                 }
5070
5071                 amdgpu_ras_intr_cleared();
5072         }
5073
5074         /* Since the mode1 reset affects base ip blocks, the
5075          * phase1 ip blocks need to be resumed. Otherwise there
5076          * will be a BIOS signature error and the psp bootloader
5077          * can't load kdb on the next amdgpu install.
5078          */
5079         if (gpu_reset_for_dev_remove) {
5080                 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
5081                         amdgpu_device_ip_resume_phase1(tmp_adev);
5082
5083                 goto end;
5084         }
5085
5086         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5087                 if (need_full_reset) {
5088                         /* post card */
5089                         r = amdgpu_device_asic_init(tmp_adev);
5090                         if (r) {
5091                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
5092                         } else {
5093                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
5094
5095                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
5096                                 if (r)
5097                                         goto out;
5098
5099                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5100 #ifdef CONFIG_DEV_COREDUMP
5101                                 tmp_adev->reset_vram_lost = vram_lost;
5102                                 memset(&tmp_adev->reset_task_info, 0,
5103                                                 sizeof(tmp_adev->reset_task_info));
5104                                 if (reset_context->job && reset_context->job->vm)
5105                                         tmp_adev->reset_task_info =
5106                                                 reset_context->job->vm->task_info;
5107                                 amdgpu_reset_capture_coredumpm(tmp_adev);
5108 #endif
5109                                 if (vram_lost) {
5110                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
5111                                         amdgpu_inc_vram_lost(tmp_adev);
5112                                 }
5113
5114                                 r = amdgpu_device_fw_loading(tmp_adev);
5115                                 if (r)
5116                                         return r;
5117
5118                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
5119                                 if (r)
5120                                         goto out;
5121
5122                                 if (vram_lost)
5123                                         amdgpu_device_fill_reset_magic(tmp_adev);
5124
5125                                 /*
5126                                  * Add this ASIC as tracked as reset was already
5127                                  * complete successfully.
5128                                  */
5129                                 amdgpu_register_gpu_instance(tmp_adev);
5130
5131                                 if (!reset_context->hive &&
5132                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5133                                         amdgpu_xgmi_add_device(tmp_adev);
5134
5135                                 r = amdgpu_device_ip_late_init(tmp_adev);
5136                                 if (r)
5137                                         goto out;
5138
5139                                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
5140
5141                                 /*
5142                                  * The GPU enters bad state once faulty pages
5143                                  * by ECC has reached the threshold, and ras
5144                                  * recovery is scheduled next. So add one check
5145                                  * here to break recovery if it indeed exceeds
5146                                  * bad page threshold, and remind user to
5147                                  * retire this GPU or setting one bigger
5148                                  * bad_page_threshold value to fix this once
5149                                  * probing driver again.
5150                                  */
5151                                 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
5152                                         /* must succeed. */
5153                                         amdgpu_ras_resume(tmp_adev);
5154                                 } else {
5155                                         r = -EINVAL;
5156                                         goto out;
5157                                 }
5158
5159                                 /* Update PSP FW topology after reset */
5160                                 if (reset_context->hive &&
5161                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5162                                         r = amdgpu_xgmi_update_topology(
5163                                                 reset_context->hive, tmp_adev);
5164                         }
5165                 }
5166
5167 out:
5168                 if (!r) {
5169                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5170                         r = amdgpu_ib_ring_tests(tmp_adev);
5171                         if (r) {
5172                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5173                                 need_full_reset = true;
5174                                 r = -EAGAIN;
5175                                 goto end;
5176                         }
5177                 }
5178
5179                 if (!r)
5180                         r = amdgpu_device_recover_vram(tmp_adev);
5181                 else
5182                         tmp_adev->asic_reset_res = r;
5183         }
5184
5185 end:
5186         if (need_full_reset)
5187                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5188         else
5189                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5190         return r;
5191 }
5192
5193 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5194 {
5195
5196         switch (amdgpu_asic_reset_method(adev)) {
5197         case AMD_RESET_METHOD_MODE1:
5198                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5199                 break;
5200         case AMD_RESET_METHOD_MODE2:
5201                 adev->mp1_state = PP_MP1_STATE_RESET;
5202                 break;
5203         default:
5204                 adev->mp1_state = PP_MP1_STATE_NONE;
5205                 break;
5206         }
5207 }
5208
5209 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5210 {
5211         amdgpu_vf_error_trans_all(adev);
5212         adev->mp1_state = PP_MP1_STATE_NONE;
5213 }
5214
5215 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5216 {
5217         struct pci_dev *p = NULL;
5218
5219         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5220                         adev->pdev->bus->number, 1);
5221         if (p) {
5222                 pm_runtime_enable(&(p->dev));
5223                 pm_runtime_resume(&(p->dev));
5224         }
5225
5226         pci_dev_put(p);
5227 }
5228
5229 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5230 {
5231         enum amd_reset_method reset_method;
5232         struct pci_dev *p = NULL;
5233         u64 expires;
5234
5235         /*
5236          * For now, only BACO and mode1 reset are confirmed
5237          * to suffer the audio issue without proper suspended.
5238          */
5239         reset_method = amdgpu_asic_reset_method(adev);
5240         if ((reset_method != AMD_RESET_METHOD_BACO) &&
5241              (reset_method != AMD_RESET_METHOD_MODE1))
5242                 return -EINVAL;
5243
5244         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5245                         adev->pdev->bus->number, 1);
5246         if (!p)
5247                 return -ENODEV;
5248
5249         expires = pm_runtime_autosuspend_expiration(&(p->dev));
5250         if (!expires)
5251                 /*
5252                  * If we cannot get the audio device autosuspend delay,
5253                  * a fixed 4S interval will be used. Considering 3S is
5254                  * the audio controller default autosuspend delay setting.
5255                  * 4S used here is guaranteed to cover that.
5256                  */
5257                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5258
5259         while (!pm_runtime_status_suspended(&(p->dev))) {
5260                 if (!pm_runtime_suspend(&(p->dev)))
5261                         break;
5262
5263                 if (expires < ktime_get_mono_fast_ns()) {
5264                         dev_warn(adev->dev, "failed to suspend display audio\n");
5265                         pci_dev_put(p);
5266                         /* TODO: abort the succeeding gpu reset? */
5267                         return -ETIMEDOUT;
5268                 }
5269         }
5270
5271         pm_runtime_disable(&(p->dev));
5272
5273         pci_dev_put(p);
5274         return 0;
5275 }
5276
5277 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5278 {
5279         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5280
5281 #if defined(CONFIG_DEBUG_FS)
5282         if (!amdgpu_sriov_vf(adev))
5283                 cancel_work(&adev->reset_work);
5284 #endif
5285
5286         if (adev->kfd.dev)
5287                 cancel_work(&adev->kfd.reset_work);
5288
5289         if (amdgpu_sriov_vf(adev))
5290                 cancel_work(&adev->virt.flr_work);
5291
5292         if (con && adev->ras_enabled)
5293                 cancel_work(&con->recovery_work);
5294
5295 }
5296
5297 /**
5298  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5299  *
5300  * @adev: amdgpu_device pointer
5301  * @job: which job trigger hang
5302  * @reset_context: amdgpu reset context pointer
5303  *
5304  * Attempt to reset the GPU if it has hung (all asics).
5305  * Attempt to do soft-reset or full-reset and reinitialize Asic
5306  * Returns 0 for success or an error on failure.
5307  */
5308
5309 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5310                               struct amdgpu_job *job,
5311                               struct amdgpu_reset_context *reset_context)
5312 {
5313         struct list_head device_list, *device_list_handle =  NULL;
5314         bool job_signaled = false;
5315         struct amdgpu_hive_info *hive = NULL;
5316         struct amdgpu_device *tmp_adev = NULL;
5317         int i, r = 0;
5318         bool need_emergency_restart = false;
5319         bool audio_suspended = false;
5320         bool gpu_reset_for_dev_remove = false;
5321
5322         gpu_reset_for_dev_remove =
5323                         test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5324                                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5325
5326         /*
5327          * Special case: RAS triggered and full reset isn't supported
5328          */
5329         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5330
5331         /*
5332          * Flush RAM to disk so that after reboot
5333          * the user can read log and see why the system rebooted.
5334          */
5335         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5336                 DRM_WARN("Emergency reboot.");
5337
5338                 ksys_sync_helper();
5339                 emergency_restart();
5340         }
5341
5342         dev_info(adev->dev, "GPU %s begin!\n",
5343                 need_emergency_restart ? "jobs stop":"reset");
5344
5345         if (!amdgpu_sriov_vf(adev))
5346                 hive = amdgpu_get_xgmi_hive(adev);
5347         if (hive)
5348                 mutex_lock(&hive->hive_lock);
5349
5350         reset_context->job = job;
5351         reset_context->hive = hive;
5352         /*
5353          * Build list of devices to reset.
5354          * In case we are in XGMI hive mode, resort the device list
5355          * to put adev in the 1st position.
5356          */
5357         INIT_LIST_HEAD(&device_list);
5358         if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5359                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5360                         list_add_tail(&tmp_adev->reset_list, &device_list);
5361                         if (gpu_reset_for_dev_remove && adev->shutdown)
5362                                 tmp_adev->shutdown = true;
5363                 }
5364                 if (!list_is_first(&adev->reset_list, &device_list))
5365                         list_rotate_to_front(&adev->reset_list, &device_list);
5366                 device_list_handle = &device_list;
5367         } else {
5368                 list_add_tail(&adev->reset_list, &device_list);
5369                 device_list_handle = &device_list;
5370         }
5371
5372         /* We need to lock reset domain only once both for XGMI and single device */
5373         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5374                                     reset_list);
5375         amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5376
5377         /* block all schedulers and reset given job's ring */
5378         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5379
5380                 amdgpu_device_set_mp1_state(tmp_adev);
5381
5382                 /*
5383                  * Try to put the audio codec into suspend state
5384                  * before gpu reset started.
5385                  *
5386                  * Due to the power domain of the graphics device
5387                  * is shared with AZ power domain. Without this,
5388                  * we may change the audio hardware from behind
5389                  * the audio driver's back. That will trigger
5390                  * some audio codec errors.
5391                  */
5392                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5393                         audio_suspended = true;
5394
5395                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5396
5397                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5398
5399                 if (!amdgpu_sriov_vf(tmp_adev))
5400                         amdgpu_amdkfd_pre_reset(tmp_adev);
5401
5402                 /*
5403                  * Mark these ASICs to be reseted as untracked first
5404                  * And add them back after reset completed
5405                  */
5406                 amdgpu_unregister_gpu_instance(tmp_adev);
5407
5408                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5409
5410                 /* disable ras on ALL IPs */
5411                 if (!need_emergency_restart &&
5412                       amdgpu_device_ip_need_full_reset(tmp_adev))
5413                         amdgpu_ras_suspend(tmp_adev);
5414
5415                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5416                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5417
5418                         if (!ring || !ring->sched.thread)
5419                                 continue;
5420
5421                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5422
5423                         if (need_emergency_restart)
5424                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5425                 }
5426                 atomic_inc(&tmp_adev->gpu_reset_counter);
5427         }
5428
5429         if (need_emergency_restart)
5430                 goto skip_sched_resume;
5431
5432         /*
5433          * Must check guilty signal here since after this point all old
5434          * HW fences are force signaled.
5435          *
5436          * job->base holds a reference to parent fence
5437          */
5438         if (job && dma_fence_is_signaled(&job->hw_fence)) {
5439                 job_signaled = true;
5440                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5441                 goto skip_hw_reset;
5442         }
5443
5444 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
5445         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5446                 if (gpu_reset_for_dev_remove) {
5447                         /* Workaroud for ASICs need to disable SMC first */
5448                         amdgpu_device_smu_fini_early(tmp_adev);
5449                 }
5450                 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5451                 /*TODO Should we stop ?*/
5452                 if (r) {
5453                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5454                                   r, adev_to_drm(tmp_adev)->unique);
5455                         tmp_adev->asic_reset_res = r;
5456                 }
5457
5458                 /*
5459                  * Drop all pending non scheduler resets. Scheduler resets
5460                  * were already dropped during drm_sched_stop
5461                  */
5462                 amdgpu_device_stop_pending_resets(tmp_adev);
5463         }
5464
5465         /* Actual ASIC resets if needed.*/
5466         /* Host driver will handle XGMI hive reset for SRIOV */
5467         if (amdgpu_sriov_vf(adev)) {
5468                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5469                 if (r)
5470                         adev->asic_reset_res = r;
5471
5472                 /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
5473                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
5474                     adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3))
5475                         amdgpu_ras_resume(adev);
5476         } else {
5477                 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5478                 if (r && r == -EAGAIN)
5479                         goto retry;
5480
5481                 if (!r && gpu_reset_for_dev_remove)
5482                         goto recover_end;
5483         }
5484
5485 skip_hw_reset:
5486
5487         /* Post ASIC reset for all devs .*/
5488         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5489
5490                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5491                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5492
5493                         if (!ring || !ring->sched.thread)
5494                                 continue;
5495
5496                         drm_sched_start(&ring->sched, true);
5497                 }
5498
5499                 if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
5500                         amdgpu_mes_self_test(tmp_adev);
5501
5502                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
5503                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5504
5505                 if (tmp_adev->asic_reset_res)
5506                         r = tmp_adev->asic_reset_res;
5507
5508                 tmp_adev->asic_reset_res = 0;
5509
5510                 if (r) {
5511                         /* bad news, how to tell it to userspace ? */
5512                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5513                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5514                 } else {
5515                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5516                         if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5517                                 DRM_WARN("smart shift update failed\n");
5518                 }
5519         }
5520
5521 skip_sched_resume:
5522         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5523                 /* unlock kfd: SRIOV would do it separately */
5524                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5525                         amdgpu_amdkfd_post_reset(tmp_adev);
5526
5527                 /* kfd_post_reset will do nothing if kfd device is not initialized,
5528                  * need to bring up kfd here if it's not be initialized before
5529                  */
5530                 if (!adev->kfd.init_complete)
5531                         amdgpu_amdkfd_device_init(adev);
5532
5533                 if (audio_suspended)
5534                         amdgpu_device_resume_display_audio(tmp_adev);
5535
5536                 amdgpu_device_unset_mp1_state(tmp_adev);
5537
5538                 amdgpu_ras_set_error_query_ready(tmp_adev, true);
5539         }
5540
5541 recover_end:
5542         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5543                                             reset_list);
5544         amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5545
5546         if (hive) {
5547                 mutex_unlock(&hive->hive_lock);
5548                 amdgpu_put_xgmi_hive(hive);
5549         }
5550
5551         if (r)
5552                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5553
5554         atomic_set(&adev->reset_domain->reset_res, r);
5555         return r;
5556 }
5557
5558 /**
5559  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5560  *
5561  * @adev: amdgpu_device pointer
5562  *
5563  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5564  * and lanes) of the slot the device is in. Handles APUs and
5565  * virtualized environments where PCIE config space may not be available.
5566  */
5567 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5568 {
5569         struct pci_dev *pdev;
5570         enum pci_bus_speed speed_cap, platform_speed_cap;
5571         enum pcie_link_width platform_link_width;
5572
5573         if (amdgpu_pcie_gen_cap)
5574                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5575
5576         if (amdgpu_pcie_lane_cap)
5577                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5578
5579         /* covers APUs as well */
5580         if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
5581                 if (adev->pm.pcie_gen_mask == 0)
5582                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5583                 if (adev->pm.pcie_mlw_mask == 0)
5584                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5585                 return;
5586         }
5587
5588         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5589                 return;
5590
5591         pcie_bandwidth_available(adev->pdev, NULL,
5592                                  &platform_speed_cap, &platform_link_width);
5593
5594         if (adev->pm.pcie_gen_mask == 0) {
5595                 /* asic caps */
5596                 pdev = adev->pdev;
5597                 speed_cap = pcie_get_speed_cap(pdev);
5598                 if (speed_cap == PCI_SPEED_UNKNOWN) {
5599                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5600                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5601                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5602                 } else {
5603                         if (speed_cap == PCIE_SPEED_32_0GT)
5604                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5605                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5606                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5607                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5608                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5609                         else if (speed_cap == PCIE_SPEED_16_0GT)
5610                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5611                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5612                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5613                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5614                         else if (speed_cap == PCIE_SPEED_8_0GT)
5615                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5616                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5617                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5618                         else if (speed_cap == PCIE_SPEED_5_0GT)
5619                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5620                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5621                         else
5622                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5623                 }
5624                 /* platform caps */
5625                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5626                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5627                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5628                 } else {
5629                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
5630                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5631                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5632                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5633                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5634                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5635                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5636                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5637                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5638                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5639                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5640                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5641                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5642                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5643                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5644                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5645                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5646                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5647                         else
5648                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5649
5650                 }
5651         }
5652         if (adev->pm.pcie_mlw_mask == 0) {
5653                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5654                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5655                 } else {
5656                         switch (platform_link_width) {
5657                         case PCIE_LNK_X32:
5658                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5659                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5660                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5661                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5662                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5663                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5664                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5665                                 break;
5666                         case PCIE_LNK_X16:
5667                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5668                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5669                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5670                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5671                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5672                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5673                                 break;
5674                         case PCIE_LNK_X12:
5675                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5676                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5677                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5678                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5679                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5680                                 break;
5681                         case PCIE_LNK_X8:
5682                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5683                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5684                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5685                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5686                                 break;
5687                         case PCIE_LNK_X4:
5688                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5689                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5690                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5691                                 break;
5692                         case PCIE_LNK_X2:
5693                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5694                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5695                                 break;
5696                         case PCIE_LNK_X1:
5697                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5698                                 break;
5699                         default:
5700                                 break;
5701                         }
5702                 }
5703         }
5704 }
5705
5706 /**
5707  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5708  *
5709  * @adev: amdgpu_device pointer
5710  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5711  *
5712  * Return true if @peer_adev can access (DMA) @adev through the PCIe
5713  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5714  * @peer_adev.
5715  */
5716 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5717                                       struct amdgpu_device *peer_adev)
5718 {
5719 #ifdef CONFIG_HSA_AMD_P2P
5720         uint64_t address_mask = peer_adev->dev->dma_mask ?
5721                 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5722         resource_size_t aper_limit =
5723                 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5724         bool p2p_access =
5725                 !adev->gmc.xgmi.connected_to_cpu &&
5726                 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5727
5728         return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5729                 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5730                 !(adev->gmc.aper_base & address_mask ||
5731                   aper_limit & address_mask));
5732 #else
5733         return false;
5734 #endif
5735 }
5736
5737 int amdgpu_device_baco_enter(struct drm_device *dev)
5738 {
5739         struct amdgpu_device *adev = drm_to_adev(dev);
5740         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5741
5742         if (!amdgpu_device_supports_baco(dev))
5743                 return -ENOTSUPP;
5744
5745         if (ras && adev->ras_enabled &&
5746             adev->nbio.funcs->enable_doorbell_interrupt)
5747                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5748
5749         return amdgpu_dpm_baco_enter(adev);
5750 }
5751
5752 int amdgpu_device_baco_exit(struct drm_device *dev)
5753 {
5754         struct amdgpu_device *adev = drm_to_adev(dev);
5755         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5756         int ret = 0;
5757
5758         if (!amdgpu_device_supports_baco(dev))
5759                 return -ENOTSUPP;
5760
5761         ret = amdgpu_dpm_baco_exit(adev);
5762         if (ret)
5763                 return ret;
5764
5765         if (ras && adev->ras_enabled &&
5766             adev->nbio.funcs->enable_doorbell_interrupt)
5767                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5768
5769         if (amdgpu_passthrough(adev) &&
5770             adev->nbio.funcs->clear_doorbell_interrupt)
5771                 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5772
5773         return 0;
5774 }
5775
5776 /**
5777  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5778  * @pdev: PCI device struct
5779  * @state: PCI channel state
5780  *
5781  * Description: Called when a PCI error is detected.
5782  *
5783  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5784  */
5785 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5786 {
5787         struct drm_device *dev = pci_get_drvdata(pdev);
5788         struct amdgpu_device *adev = drm_to_adev(dev);
5789         int i;
5790
5791         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5792
5793         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5794                 DRM_WARN("No support for XGMI hive yet...");
5795                 return PCI_ERS_RESULT_DISCONNECT;
5796         }
5797
5798         adev->pci_channel_state = state;
5799
5800         switch (state) {
5801         case pci_channel_io_normal:
5802                 return PCI_ERS_RESULT_CAN_RECOVER;
5803         /* Fatal error, prepare for slot reset */
5804         case pci_channel_io_frozen:
5805                 /*
5806                  * Locking adev->reset_domain->sem will prevent any external access
5807                  * to GPU during PCI error recovery
5808                  */
5809                 amdgpu_device_lock_reset_domain(adev->reset_domain);
5810                 amdgpu_device_set_mp1_state(adev);
5811
5812                 /*
5813                  * Block any work scheduling as we do for regular GPU reset
5814                  * for the duration of the recovery
5815                  */
5816                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5817                         struct amdgpu_ring *ring = adev->rings[i];
5818
5819                         if (!ring || !ring->sched.thread)
5820                                 continue;
5821
5822                         drm_sched_stop(&ring->sched, NULL);
5823                 }
5824                 atomic_inc(&adev->gpu_reset_counter);
5825                 return PCI_ERS_RESULT_NEED_RESET;
5826         case pci_channel_io_perm_failure:
5827                 /* Permanent error, prepare for device removal */
5828                 return PCI_ERS_RESULT_DISCONNECT;
5829         }
5830
5831         return PCI_ERS_RESULT_NEED_RESET;
5832 }
5833
5834 /**
5835  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5836  * @pdev: pointer to PCI device
5837  */
5838 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5839 {
5840
5841         DRM_INFO("PCI error: mmio enabled callback!!\n");
5842
5843         /* TODO - dump whatever for debugging purposes */
5844
5845         /* This called only if amdgpu_pci_error_detected returns
5846          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5847          * works, no need to reset slot.
5848          */
5849
5850         return PCI_ERS_RESULT_RECOVERED;
5851 }
5852
5853 /**
5854  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5855  * @pdev: PCI device struct
5856  *
5857  * Description: This routine is called by the pci error recovery
5858  * code after the PCI slot has been reset, just before we
5859  * should resume normal operations.
5860  */
5861 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5862 {
5863         struct drm_device *dev = pci_get_drvdata(pdev);
5864         struct amdgpu_device *adev = drm_to_adev(dev);
5865         int r, i;
5866         struct amdgpu_reset_context reset_context;
5867         u32 memsize;
5868         struct list_head device_list;
5869
5870         DRM_INFO("PCI error: slot reset callback!!\n");
5871
5872         memset(&reset_context, 0, sizeof(reset_context));
5873
5874         INIT_LIST_HEAD(&device_list);
5875         list_add_tail(&adev->reset_list, &device_list);
5876
5877         /* wait for asic to come out of reset */
5878         msleep(500);
5879
5880         /* Restore PCI confspace */
5881         amdgpu_device_load_pci_state(pdev);
5882
5883         /* confirm  ASIC came out of reset */
5884         for (i = 0; i < adev->usec_timeout; i++) {
5885                 memsize = amdgpu_asic_get_config_memsize(adev);
5886
5887                 if (memsize != 0xffffffff)
5888                         break;
5889                 udelay(1);
5890         }
5891         if (memsize == 0xffffffff) {
5892                 r = -ETIME;
5893                 goto out;
5894         }
5895
5896         reset_context.method = AMD_RESET_METHOD_NONE;
5897         reset_context.reset_req_dev = adev;
5898         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5899         set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5900
5901         adev->no_hw_access = true;
5902         r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5903         adev->no_hw_access = false;
5904         if (r)
5905                 goto out;
5906
5907         r = amdgpu_do_asic_reset(&device_list, &reset_context);
5908
5909 out:
5910         if (!r) {
5911                 if (amdgpu_device_cache_pci_state(adev->pdev))
5912                         pci_restore_state(adev->pdev);
5913
5914                 DRM_INFO("PCIe error recovery succeeded\n");
5915         } else {
5916                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5917                 amdgpu_device_unset_mp1_state(adev);
5918                 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5919         }
5920
5921         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5922 }
5923
5924 /**
5925  * amdgpu_pci_resume() - resume normal ops after PCI reset
5926  * @pdev: pointer to PCI device
5927  *
5928  * Called when the error recovery driver tells us that its
5929  * OK to resume normal operation.
5930  */
5931 void amdgpu_pci_resume(struct pci_dev *pdev)
5932 {
5933         struct drm_device *dev = pci_get_drvdata(pdev);
5934         struct amdgpu_device *adev = drm_to_adev(dev);
5935         int i;
5936
5937
5938         DRM_INFO("PCI error: resume callback!!\n");
5939
5940         /* Only continue execution for the case of pci_channel_io_frozen */
5941         if (adev->pci_channel_state != pci_channel_io_frozen)
5942                 return;
5943
5944         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5945                 struct amdgpu_ring *ring = adev->rings[i];
5946
5947                 if (!ring || !ring->sched.thread)
5948                         continue;
5949
5950                 drm_sched_start(&ring->sched, true);
5951         }
5952
5953         amdgpu_device_unset_mp1_state(adev);
5954         amdgpu_device_unlock_reset_domain(adev->reset_domain);
5955 }
5956
5957 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5958 {
5959         struct drm_device *dev = pci_get_drvdata(pdev);
5960         struct amdgpu_device *adev = drm_to_adev(dev);
5961         int r;
5962
5963         r = pci_save_state(pdev);
5964         if (!r) {
5965                 kfree(adev->pci_state);
5966
5967                 adev->pci_state = pci_store_saved_state(pdev);
5968
5969                 if (!adev->pci_state) {
5970                         DRM_ERROR("Failed to store PCI saved state");
5971                         return false;
5972                 }
5973         } else {
5974                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5975                 return false;
5976         }
5977
5978         return true;
5979 }
5980
5981 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5982 {
5983         struct drm_device *dev = pci_get_drvdata(pdev);
5984         struct amdgpu_device *adev = drm_to_adev(dev);
5985         int r;
5986
5987         if (!adev->pci_state)
5988                 return false;
5989
5990         r = pci_load_saved_state(pdev, adev->pci_state);
5991
5992         if (!r) {
5993                 pci_restore_state(pdev);
5994         } else {
5995                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5996                 return false;
5997         }
5998
5999         return true;
6000 }
6001
6002 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6003                 struct amdgpu_ring *ring)
6004 {
6005 #ifdef CONFIG_X86_64
6006         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6007                 return;
6008 #endif
6009         if (adev->gmc.xgmi.connected_to_cpu)
6010                 return;
6011
6012         if (ring && ring->funcs->emit_hdp_flush)
6013                 amdgpu_ring_emit_hdp_flush(ring);
6014         else
6015                 amdgpu_asic_flush_hdp(adev, ring);
6016 }
6017
6018 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6019                 struct amdgpu_ring *ring)
6020 {
6021 #ifdef CONFIG_X86_64
6022         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6023                 return;
6024 #endif
6025         if (adev->gmc.xgmi.connected_to_cpu)
6026                 return;
6027
6028         amdgpu_asic_invalidate_hdp(adev, ring);
6029 }
6030
6031 int amdgpu_in_reset(struct amdgpu_device *adev)
6032 {
6033         return atomic_read(&adev->reset_domain->in_gpu_reset);
6034 }
6035
6036 /**
6037  * amdgpu_device_halt() - bring hardware to some kind of halt state
6038  *
6039  * @adev: amdgpu_device pointer
6040  *
6041  * Bring hardware to some kind of halt state so that no one can touch it
6042  * any more. It will help to maintain error context when error occurred.
6043  * Compare to a simple hang, the system will keep stable at least for SSH
6044  * access. Then it should be trivial to inspect the hardware state and
6045  * see what's going on. Implemented as following:
6046  *
6047  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
6048  *    clears all CPU mappings to device, disallows remappings through page faults
6049  * 2. amdgpu_irq_disable_all() disables all interrupts
6050  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
6051  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6052  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
6053  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
6054  *    flush any in flight DMA operations
6055  */
6056 void amdgpu_device_halt(struct amdgpu_device *adev)
6057 {
6058         struct pci_dev *pdev = adev->pdev;
6059         struct drm_device *ddev = adev_to_drm(adev);
6060
6061         amdgpu_xcp_dev_unplug(adev);
6062         drm_dev_unplug(ddev);
6063
6064         amdgpu_irq_disable_all(adev);
6065
6066         amdgpu_fence_driver_hw_fini(adev);
6067
6068         adev->no_hw_access = true;
6069
6070         amdgpu_device_unmap_mmio(adev);
6071
6072         pci_disable_device(pdev);
6073         pci_wait_for_pending_transaction(pdev);
6074 }
6075
6076 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
6077                                 u32 reg)
6078 {
6079         unsigned long flags, address, data;
6080         u32 r;
6081
6082         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6083         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6084
6085         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6086         WREG32(address, reg * 4);
6087         (void)RREG32(address);
6088         r = RREG32(data);
6089         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6090         return r;
6091 }
6092
6093 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6094                                 u32 reg, u32 v)
6095 {
6096         unsigned long flags, address, data;
6097
6098         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6099         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6100
6101         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6102         WREG32(address, reg * 4);
6103         (void)RREG32(address);
6104         WREG32(data, v);
6105         (void)RREG32(data);
6106         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6107 }
6108
6109 /**
6110  * amdgpu_device_switch_gang - switch to a new gang
6111  * @adev: amdgpu_device pointer
6112  * @gang: the gang to switch to
6113  *
6114  * Try to switch to a new gang.
6115  * Returns: NULL if we switched to the new gang or a reference to the current
6116  * gang leader.
6117  */
6118 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6119                                             struct dma_fence *gang)
6120 {
6121         struct dma_fence *old = NULL;
6122
6123         do {
6124                 dma_fence_put(old);
6125                 rcu_read_lock();
6126                 old = dma_fence_get_rcu_safe(&adev->gang_submit);
6127                 rcu_read_unlock();
6128
6129                 if (old == gang)
6130                         break;
6131
6132                 if (!dma_fence_is_signaled(old))
6133                         return old;
6134
6135         } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6136                          old, gang) != old);
6137
6138         dma_fence_put(old);
6139         return NULL;
6140 }
6141
6142 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6143 {
6144         switch (adev->asic_type) {
6145 #ifdef CONFIG_DRM_AMDGPU_SI
6146         case CHIP_HAINAN:
6147 #endif
6148         case CHIP_TOPAZ:
6149                 /* chips with no display hardware */
6150                 return false;
6151 #ifdef CONFIG_DRM_AMDGPU_SI
6152         case CHIP_TAHITI:
6153         case CHIP_PITCAIRN:
6154         case CHIP_VERDE:
6155         case CHIP_OLAND:
6156 #endif
6157 #ifdef CONFIG_DRM_AMDGPU_CIK
6158         case CHIP_BONAIRE:
6159         case CHIP_HAWAII:
6160         case CHIP_KAVERI:
6161         case CHIP_KABINI:
6162         case CHIP_MULLINS:
6163 #endif
6164         case CHIP_TONGA:
6165         case CHIP_FIJI:
6166         case CHIP_POLARIS10:
6167         case CHIP_POLARIS11:
6168         case CHIP_POLARIS12:
6169         case CHIP_VEGAM:
6170         case CHIP_CARRIZO:
6171         case CHIP_STONEY:
6172                 /* chips with display hardware */
6173                 return true;
6174         default:
6175                 /* IP discovery */
6176                 if (!adev->ip_versions[DCE_HWIP][0] ||
6177                     (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6178                         return false;
6179                 return true;
6180         }
6181 }
6182
6183 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
6184                 uint32_t inst, uint32_t reg_addr, char reg_name[],
6185                 uint32_t expected_value, uint32_t mask)
6186 {
6187         uint32_t ret = 0;
6188         uint32_t old_ = 0;
6189         uint32_t tmp_ = RREG32(reg_addr);
6190         uint32_t loop = adev->usec_timeout;
6191
6192         while ((tmp_ & (mask)) != (expected_value)) {
6193                 if (old_ != tmp_) {
6194                         loop = adev->usec_timeout;
6195                         old_ = tmp_;
6196                 } else
6197                         udelay(1);
6198                 tmp_ = RREG32(reg_addr);
6199                 loop--;
6200                 if (!loop) {
6201                         DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
6202                                   inst, reg_name, (uint32_t)expected_value,
6203                                   (uint32_t)(tmp_ & (mask)));
6204                         ret = -ETIMEDOUT;
6205                         break;
6206                 }
6207         }
6208         return ret;
6209 }
This page took 0.430151 seconds and 4 git commands to generate.