2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_fb_helper.h>
41 #include <drm/drm_probe_helper.h>
42 #include <drm/amdgpu_drm.h>
43 #include <linux/vgaarb.h>
44 #include <linux/vga_switcheroo.h>
45 #include <linux/efi.h>
47 #include "amdgpu_trace.h"
48 #include "amdgpu_i2c.h"
50 #include "amdgpu_atombios.h"
51 #include "amdgpu_atomfirmware.h"
53 #ifdef CONFIG_DRM_AMDGPU_SI
56 #ifdef CONFIG_DRM_AMDGPU_CIK
62 #include "bif/bif_4_1_d.h"
63 #include <linux/firmware.h>
64 #include "amdgpu_vf_error.h"
66 #include "amdgpu_amdkfd.h"
67 #include "amdgpu_pm.h"
69 #include "amdgpu_xgmi.h"
70 #include "amdgpu_ras.h"
71 #include "amdgpu_pmu.h"
72 #include "amdgpu_fru_eeprom.h"
73 #include "amdgpu_reset.h"
75 #include <linux/suspend.h>
76 #include <drm/task_barrier.h>
77 #include <linux/pm_runtime.h>
79 #include <drm/drm_drv.h>
81 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
87 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
89 #define AMDGPU_RESUME_MS 2000
90 #define AMDGPU_MAX_RETRY_LIMIT 2
91 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
93 const char *amdgpu_asic_name[] = {
135 * DOC: pcie_replay_count
137 * The amdgpu driver provides a sysfs API for reporting the total number
138 * of PCIe replays (NAKs)
139 * The file pcie_replay_count is used for this and returns the total
140 * number of replays as a sum of the NAKs generated and NAKs received
143 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
144 struct device_attribute *attr, char *buf)
146 struct drm_device *ddev = dev_get_drvdata(dev);
147 struct amdgpu_device *adev = drm_to_adev(ddev);
148 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
150 return sysfs_emit(buf, "%llu\n", cnt);
153 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
154 amdgpu_device_get_pcie_replay_count, NULL);
156 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
161 * The amdgpu driver provides a sysfs API for reporting the product name
163 * The file serial_number is used for this and returns the product name
164 * as returned from the FRU.
165 * NOTE: This is only available for certain server cards
168 static ssize_t amdgpu_device_get_product_name(struct device *dev,
169 struct device_attribute *attr, char *buf)
171 struct drm_device *ddev = dev_get_drvdata(dev);
172 struct amdgpu_device *adev = drm_to_adev(ddev);
174 return sysfs_emit(buf, "%s\n", adev->product_name);
177 static DEVICE_ATTR(product_name, S_IRUGO,
178 amdgpu_device_get_product_name, NULL);
181 * DOC: product_number
183 * The amdgpu driver provides a sysfs API for reporting the part number
185 * The file serial_number is used for this and returns the part number
186 * as returned from the FRU.
187 * NOTE: This is only available for certain server cards
190 static ssize_t amdgpu_device_get_product_number(struct device *dev,
191 struct device_attribute *attr, char *buf)
193 struct drm_device *ddev = dev_get_drvdata(dev);
194 struct amdgpu_device *adev = drm_to_adev(ddev);
196 return sysfs_emit(buf, "%s\n", adev->product_number);
199 static DEVICE_ATTR(product_number, S_IRUGO,
200 amdgpu_device_get_product_number, NULL);
205 * The amdgpu driver provides a sysfs API for reporting the serial number
207 * The file serial_number is used for this and returns the serial number
208 * as returned from the FRU.
209 * NOTE: This is only available for certain server cards
212 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
213 struct device_attribute *attr, char *buf)
215 struct drm_device *ddev = dev_get_drvdata(dev);
216 struct amdgpu_device *adev = drm_to_adev(ddev);
218 return sysfs_emit(buf, "%s\n", adev->serial);
221 static DEVICE_ATTR(serial_number, S_IRUGO,
222 amdgpu_device_get_serial_number, NULL);
225 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
227 * @dev: drm_device pointer
229 * Returns true if the device is a dGPU with ATPX power control,
230 * otherwise return false.
232 bool amdgpu_device_supports_px(struct drm_device *dev)
234 struct amdgpu_device *adev = drm_to_adev(dev);
236 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
242 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
244 * @dev: drm_device pointer
246 * Returns true if the device is a dGPU with ACPI power control,
247 * otherwise return false.
249 bool amdgpu_device_supports_boco(struct drm_device *dev)
251 struct amdgpu_device *adev = drm_to_adev(dev);
254 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
260 * amdgpu_device_supports_baco - Does the device support BACO
262 * @dev: drm_device pointer
264 * Returns true if the device supporte BACO,
265 * otherwise return false.
267 bool amdgpu_device_supports_baco(struct drm_device *dev)
269 struct amdgpu_device *adev = drm_to_adev(dev);
271 return amdgpu_asic_supports_baco(adev);
275 * amdgpu_device_supports_smart_shift - Is the device dGPU with
276 * smart shift support
278 * @dev: drm_device pointer
280 * Returns true if the device is a dGPU with Smart Shift support,
281 * otherwise returns false.
283 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
285 return (amdgpu_device_supports_boco(dev) &&
286 amdgpu_acpi_is_power_shift_control_supported());
290 * VRAM access helper functions
294 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
296 * @adev: amdgpu_device pointer
297 * @pos: offset of the buffer in vram
298 * @buf: virtual address of the buffer in system memory
299 * @size: read/write size, sizeof(@buf) must > @size
300 * @write: true - write to vram, otherwise - read from vram
302 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
303 void *buf, size_t size, bool write)
306 uint32_t hi = ~0, tmp = 0;
307 uint32_t *data = buf;
311 if (!drm_dev_enter(adev_to_drm(adev), &idx))
314 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
316 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
317 for (last = pos + size; pos < last; pos += 4) {
320 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
322 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
326 WREG32_NO_KIQ(mmMM_DATA, *data++);
328 *data++ = RREG32_NO_KIQ(mmMM_DATA);
331 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
336 * amdgpu_device_aper_access - access vram by vram aperature
338 * @adev: amdgpu_device pointer
339 * @pos: offset of the buffer in vram
340 * @buf: virtual address of the buffer in system memory
341 * @size: read/write size, sizeof(@buf) must > @size
342 * @write: true - write to vram, otherwise - read from vram
344 * The return value means how many bytes have been transferred.
346 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
347 void *buf, size_t size, bool write)
354 if (!adev->mman.aper_base_kaddr)
357 last = min(pos + size, adev->gmc.visible_vram_size);
359 addr = adev->mman.aper_base_kaddr + pos;
363 memcpy_toio(addr, buf, count);
365 amdgpu_device_flush_hdp(adev, NULL);
367 amdgpu_device_invalidate_hdp(adev, NULL);
369 memcpy_fromio(buf, addr, count);
381 * amdgpu_device_vram_access - read/write a buffer in vram
383 * @adev: amdgpu_device pointer
384 * @pos: offset of the buffer in vram
385 * @buf: virtual address of the buffer in system memory
386 * @size: read/write size, sizeof(@buf) must > @size
387 * @write: true - write to vram, otherwise - read from vram
389 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
390 void *buf, size_t size, bool write)
394 /* try to using vram apreature to access vram first */
395 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
398 /* using MM to access rest vram */
401 amdgpu_device_mm_access(adev, pos, buf, size, write);
406 * register access helper functions.
409 /* Check if hw access should be skipped because of hotplug or device error */
410 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
412 if (adev->no_hw_access)
415 #ifdef CONFIG_LOCKDEP
417 * This is a bit complicated to understand, so worth a comment. What we assert
418 * here is that the GPU reset is not running on another thread in parallel.
420 * For this we trylock the read side of the reset semaphore, if that succeeds
421 * we know that the reset is not running in paralell.
423 * If the trylock fails we assert that we are either already holding the read
424 * side of the lock or are the reset thread itself and hold the write side of
428 if (down_read_trylock(&adev->reset_domain->sem))
429 up_read(&adev->reset_domain->sem);
431 lockdep_assert_held(&adev->reset_domain->sem);
438 * amdgpu_device_rreg - read a memory mapped IO or indirect register
440 * @adev: amdgpu_device pointer
441 * @reg: dword aligned register offset
442 * @acc_flags: access flags which require special behavior
444 * Returns the 32 bit value from the offset specified.
446 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
447 uint32_t reg, uint32_t acc_flags)
451 if (amdgpu_device_skip_hw_access(adev))
454 if ((reg * 4) < adev->rmmio_size) {
455 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
456 amdgpu_sriov_runtime(adev) &&
457 down_read_trylock(&adev->reset_domain->sem)) {
458 ret = amdgpu_kiq_rreg(adev, reg);
459 up_read(&adev->reset_domain->sem);
461 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
464 ret = adev->pcie_rreg(adev, reg * 4);
467 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
473 * MMIO register read with bytes helper functions
474 * @offset:bytes offset from MMIO start
479 * amdgpu_mm_rreg8 - read a memory mapped IO register
481 * @adev: amdgpu_device pointer
482 * @offset: byte aligned register offset
484 * Returns the 8 bit value from the offset specified.
486 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
488 if (amdgpu_device_skip_hw_access(adev))
491 if (offset < adev->rmmio_size)
492 return (readb(adev->rmmio + offset));
497 * MMIO register write with bytes helper functions
498 * @offset:bytes offset from MMIO start
499 * @value: the value want to be written to the register
503 * amdgpu_mm_wreg8 - read a memory mapped IO register
505 * @adev: amdgpu_device pointer
506 * @offset: byte aligned register offset
507 * @value: 8 bit value to write
509 * Writes the value specified to the offset specified.
511 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
513 if (amdgpu_device_skip_hw_access(adev))
516 if (offset < adev->rmmio_size)
517 writeb(value, adev->rmmio + offset);
523 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
525 * @adev: amdgpu_device pointer
526 * @reg: dword aligned register offset
527 * @v: 32 bit value to write to the register
528 * @acc_flags: access flags which require special behavior
530 * Writes the value specified to the offset specified.
532 void amdgpu_device_wreg(struct amdgpu_device *adev,
533 uint32_t reg, uint32_t v,
536 if (amdgpu_device_skip_hw_access(adev))
539 if ((reg * 4) < adev->rmmio_size) {
540 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
541 amdgpu_sriov_runtime(adev) &&
542 down_read_trylock(&adev->reset_domain->sem)) {
543 amdgpu_kiq_wreg(adev, reg, v);
544 up_read(&adev->reset_domain->sem);
546 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
549 adev->pcie_wreg(adev, reg * 4, v);
552 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
556 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
558 * @adev: amdgpu_device pointer
559 * @reg: mmio/rlc register
562 * this function is invoked only for the debugfs register access
564 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
565 uint32_t reg, uint32_t v)
567 if (amdgpu_device_skip_hw_access(adev))
570 if (amdgpu_sriov_fullaccess(adev) &&
571 adev->gfx.rlc.funcs &&
572 adev->gfx.rlc.funcs->is_rlcg_access_range) {
573 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
574 return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
575 } else if ((reg * 4) >= adev->rmmio_size) {
576 adev->pcie_wreg(adev, reg * 4, v);
578 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
583 * amdgpu_mm_rdoorbell - read a doorbell dword
585 * @adev: amdgpu_device pointer
586 * @index: doorbell index
588 * Returns the value in the doorbell aperture at the
589 * requested doorbell index (CIK).
591 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
593 if (amdgpu_device_skip_hw_access(adev))
596 if (index < adev->doorbell.num_doorbells) {
597 return readl(adev->doorbell.ptr + index);
599 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
605 * amdgpu_mm_wdoorbell - write a doorbell dword
607 * @adev: amdgpu_device pointer
608 * @index: doorbell index
611 * Writes @v to the doorbell aperture at the
612 * requested doorbell index (CIK).
614 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
616 if (amdgpu_device_skip_hw_access(adev))
619 if (index < adev->doorbell.num_doorbells) {
620 writel(v, adev->doorbell.ptr + index);
622 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
627 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
629 * @adev: amdgpu_device pointer
630 * @index: doorbell index
632 * Returns the value in the doorbell aperture at the
633 * requested doorbell index (VEGA10+).
635 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
637 if (amdgpu_device_skip_hw_access(adev))
640 if (index < adev->doorbell.num_doorbells) {
641 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
643 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
649 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
651 * @adev: amdgpu_device pointer
652 * @index: doorbell index
655 * Writes @v to the doorbell aperture at the
656 * requested doorbell index (VEGA10+).
658 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
660 if (amdgpu_device_skip_hw_access(adev))
663 if (index < adev->doorbell.num_doorbells) {
664 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
666 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
671 * amdgpu_device_indirect_rreg - read an indirect register
673 * @adev: amdgpu_device pointer
674 * @pcie_index: mmio register offset
675 * @pcie_data: mmio register offset
676 * @reg_addr: indirect register address to read from
678 * Returns the value of indirect register @reg_addr
680 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
681 u32 pcie_index, u32 pcie_data,
686 void __iomem *pcie_index_offset;
687 void __iomem *pcie_data_offset;
689 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
690 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
691 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
693 writel(reg_addr, pcie_index_offset);
694 readl(pcie_index_offset);
695 r = readl(pcie_data_offset);
696 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
702 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
704 * @adev: amdgpu_device pointer
705 * @pcie_index: mmio register offset
706 * @pcie_data: mmio register offset
707 * @reg_addr: indirect register address to read from
709 * Returns the value of indirect register @reg_addr
711 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
712 u32 pcie_index, u32 pcie_data,
717 void __iomem *pcie_index_offset;
718 void __iomem *pcie_data_offset;
720 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
721 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
722 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
724 /* read low 32 bits */
725 writel(reg_addr, pcie_index_offset);
726 readl(pcie_index_offset);
727 r = readl(pcie_data_offset);
728 /* read high 32 bits */
729 writel(reg_addr + 4, pcie_index_offset);
730 readl(pcie_index_offset);
731 r |= ((u64)readl(pcie_data_offset) << 32);
732 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
738 * amdgpu_device_indirect_wreg - write an indirect register address
740 * @adev: amdgpu_device pointer
741 * @pcie_index: mmio register offset
742 * @pcie_data: mmio register offset
743 * @reg_addr: indirect register offset
744 * @reg_data: indirect register data
747 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
748 u32 pcie_index, u32 pcie_data,
749 u32 reg_addr, u32 reg_data)
752 void __iomem *pcie_index_offset;
753 void __iomem *pcie_data_offset;
755 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
756 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
757 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
759 writel(reg_addr, pcie_index_offset);
760 readl(pcie_index_offset);
761 writel(reg_data, pcie_data_offset);
762 readl(pcie_data_offset);
763 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
767 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
769 * @adev: amdgpu_device pointer
770 * @pcie_index: mmio register offset
771 * @pcie_data: mmio register offset
772 * @reg_addr: indirect register offset
773 * @reg_data: indirect register data
776 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
777 u32 pcie_index, u32 pcie_data,
778 u32 reg_addr, u64 reg_data)
781 void __iomem *pcie_index_offset;
782 void __iomem *pcie_data_offset;
784 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
785 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
786 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
788 /* write low 32 bits */
789 writel(reg_addr, pcie_index_offset);
790 readl(pcie_index_offset);
791 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
792 readl(pcie_data_offset);
793 /* write high 32 bits */
794 writel(reg_addr + 4, pcie_index_offset);
795 readl(pcie_index_offset);
796 writel((u32)(reg_data >> 32), pcie_data_offset);
797 readl(pcie_data_offset);
798 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
802 * amdgpu_invalid_rreg - dummy reg read function
804 * @adev: amdgpu_device pointer
805 * @reg: offset of register
807 * Dummy register read function. Used for register blocks
808 * that certain asics don't have (all asics).
809 * Returns the value in the register.
811 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
813 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
819 * amdgpu_invalid_wreg - dummy reg write function
821 * @adev: amdgpu_device pointer
822 * @reg: offset of register
823 * @v: value to write to the register
825 * Dummy register read function. Used for register blocks
826 * that certain asics don't have (all asics).
828 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
830 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
836 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
838 * @adev: amdgpu_device pointer
839 * @reg: offset of register
841 * Dummy register read function. Used for register blocks
842 * that certain asics don't have (all asics).
843 * Returns the value in the register.
845 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
847 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
853 * amdgpu_invalid_wreg64 - dummy reg write function
855 * @adev: amdgpu_device pointer
856 * @reg: offset of register
857 * @v: value to write to the register
859 * Dummy register read function. Used for register blocks
860 * that certain asics don't have (all asics).
862 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
864 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
870 * amdgpu_block_invalid_rreg - dummy reg read function
872 * @adev: amdgpu_device pointer
873 * @block: offset of instance
874 * @reg: offset of register
876 * Dummy register read function. Used for register blocks
877 * that certain asics don't have (all asics).
878 * Returns the value in the register.
880 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
881 uint32_t block, uint32_t reg)
883 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
890 * amdgpu_block_invalid_wreg - dummy reg write function
892 * @adev: amdgpu_device pointer
893 * @block: offset of instance
894 * @reg: offset of register
895 * @v: value to write to the register
897 * Dummy register read function. Used for register blocks
898 * that certain asics don't have (all asics).
900 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
902 uint32_t reg, uint32_t v)
904 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
910 * amdgpu_device_asic_init - Wrapper for atom asic_init
912 * @adev: amdgpu_device pointer
914 * Does any asic specific work and then calls atom asic init.
916 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
918 amdgpu_asic_pre_asic_init(adev);
920 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
921 return amdgpu_atomfirmware_asic_init(adev, true);
923 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
927 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
929 * @adev: amdgpu_device pointer
931 * Allocates a scratch page of VRAM for use by various things in the
934 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
936 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
937 AMDGPU_GEM_DOMAIN_VRAM |
938 AMDGPU_GEM_DOMAIN_GTT,
939 &adev->mem_scratch.robj,
940 &adev->mem_scratch.gpu_addr,
941 (void **)&adev->mem_scratch.ptr);
945 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
947 * @adev: amdgpu_device pointer
949 * Frees the VRAM scratch page.
951 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
953 amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
957 * amdgpu_device_program_register_sequence - program an array of registers.
959 * @adev: amdgpu_device pointer
960 * @registers: pointer to the register array
961 * @array_size: size of the register array
963 * Programs an array or registers with and and or masks.
964 * This is a helper for setting golden registers.
966 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
967 const u32 *registers,
968 const u32 array_size)
970 u32 tmp, reg, and_mask, or_mask;
976 for (i = 0; i < array_size; i +=3) {
977 reg = registers[i + 0];
978 and_mask = registers[i + 1];
979 or_mask = registers[i + 2];
981 if (and_mask == 0xffffffff) {
986 if (adev->family >= AMDGPU_FAMILY_AI)
987 tmp |= (or_mask & and_mask);
996 * amdgpu_device_pci_config_reset - reset the GPU
998 * @adev: amdgpu_device pointer
1000 * Resets the GPU using the pci config reset sequence.
1001 * Only applicable to asics prior to vega10.
1003 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1005 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1009 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1011 * @adev: amdgpu_device pointer
1013 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1015 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1017 return pci_reset_function(adev->pdev);
1021 * GPU doorbell aperture helpers function.
1024 * amdgpu_device_doorbell_init - Init doorbell driver information.
1026 * @adev: amdgpu_device pointer
1028 * Init doorbell driver information (CIK)
1029 * Returns 0 on success, error on failure.
1031 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1034 /* No doorbell on SI hardware generation */
1035 if (adev->asic_type < CHIP_BONAIRE) {
1036 adev->doorbell.base = 0;
1037 adev->doorbell.size = 0;
1038 adev->doorbell.num_doorbells = 0;
1039 adev->doorbell.ptr = NULL;
1043 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1046 amdgpu_asic_init_doorbell_index(adev);
1048 /* doorbell bar mapping */
1049 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1050 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1052 if (adev->enable_mes) {
1053 adev->doorbell.num_doorbells =
1054 adev->doorbell.size / sizeof(u32);
1056 adev->doorbell.num_doorbells =
1057 min_t(u32, adev->doorbell.size / sizeof(u32),
1058 adev->doorbell_index.max_assignment+1);
1059 if (adev->doorbell.num_doorbells == 0)
1062 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1063 * paging queue doorbell use the second page. The
1064 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1065 * doorbells are in the first page. So with paging queue enabled,
1066 * the max num_doorbells should + 1 page (0x400 in dword)
1068 if (adev->asic_type >= CHIP_VEGA10)
1069 adev->doorbell.num_doorbells += 0x400;
1072 adev->doorbell.ptr = ioremap(adev->doorbell.base,
1073 adev->doorbell.num_doorbells *
1075 if (adev->doorbell.ptr == NULL)
1082 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1084 * @adev: amdgpu_device pointer
1086 * Tear down doorbell driver information (CIK)
1088 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1090 iounmap(adev->doorbell.ptr);
1091 adev->doorbell.ptr = NULL;
1097 * amdgpu_device_wb_*()
1098 * Writeback is the method by which the GPU updates special pages in memory
1099 * with the status of certain GPU events (fences, ring pointers,etc.).
1103 * amdgpu_device_wb_fini - Disable Writeback and free memory
1105 * @adev: amdgpu_device pointer
1107 * Disables Writeback and frees the Writeback memory (all asics).
1108 * Used at driver shutdown.
1110 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1112 if (adev->wb.wb_obj) {
1113 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1115 (void **)&adev->wb.wb);
1116 adev->wb.wb_obj = NULL;
1121 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1123 * @adev: amdgpu_device pointer
1125 * Initializes writeback and allocates writeback memory (all asics).
1126 * Used at driver startup.
1127 * Returns 0 on success or an -error on failure.
1129 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1133 if (adev->wb.wb_obj == NULL) {
1134 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1135 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1136 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1137 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1138 (void **)&adev->wb.wb);
1140 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1144 adev->wb.num_wb = AMDGPU_MAX_WB;
1145 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1147 /* clear wb memory */
1148 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1155 * amdgpu_device_wb_get - Allocate a wb entry
1157 * @adev: amdgpu_device pointer
1160 * Allocate a wb slot for use by the driver (all asics).
1161 * Returns 0 on success or -EINVAL on failure.
1163 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1165 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1167 if (offset < adev->wb.num_wb) {
1168 __set_bit(offset, adev->wb.used);
1169 *wb = offset << 3; /* convert to dw offset */
1177 * amdgpu_device_wb_free - Free a wb entry
1179 * @adev: amdgpu_device pointer
1182 * Free a wb slot allocated for use by the driver (all asics)
1184 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1187 if (wb < adev->wb.num_wb)
1188 __clear_bit(wb, adev->wb.used);
1192 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1194 * @adev: amdgpu_device pointer
1196 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1197 * to fail, but if any of the BARs is not accessible after the size we abort
1198 * driver loading by returning -ENODEV.
1200 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1202 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1203 struct pci_bus *root;
1204 struct resource *res;
1210 if (amdgpu_sriov_vf(adev))
1213 /* skip if the bios has already enabled large BAR */
1214 if (adev->gmc.real_vram_size &&
1215 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1218 /* Check if the root BUS has 64bit memory resources */
1219 root = adev->pdev->bus;
1220 while (root->parent)
1221 root = root->parent;
1223 pci_bus_for_each_resource(root, res, i) {
1224 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1225 res->start > 0x100000000ull)
1229 /* Trying to resize is pointless without a root hub window above 4GB */
1233 /* Limit the BAR size to what is available */
1234 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1237 /* Disable memory decoding while we change the BAR addresses and size */
1238 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1239 pci_write_config_word(adev->pdev, PCI_COMMAND,
1240 cmd & ~PCI_COMMAND_MEMORY);
1242 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1243 amdgpu_device_doorbell_fini(adev);
1244 if (adev->asic_type >= CHIP_BONAIRE)
1245 pci_release_resource(adev->pdev, 2);
1247 pci_release_resource(adev->pdev, 0);
1249 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1251 DRM_INFO("Not enough PCI address space for a large BAR.");
1252 else if (r && r != -ENOTSUPP)
1253 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1255 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1257 /* When the doorbell or fb BAR isn't available we have no chance of
1260 r = amdgpu_device_doorbell_init(adev);
1261 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1264 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1270 * GPU helpers function.
1273 * amdgpu_device_need_post - check if the hw need post or not
1275 * @adev: amdgpu_device pointer
1277 * Check if the asic has been initialized (all asics) at driver startup
1278 * or post is needed if hw reset is performed.
1279 * Returns true if need or false if not.
1281 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1285 if (amdgpu_sriov_vf(adev))
1288 if (amdgpu_passthrough(adev)) {
1289 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1290 * some old smc fw still need driver do vPost otherwise gpu hang, while
1291 * those smc fw version above 22.15 doesn't have this flaw, so we force
1292 * vpost executed for smc version below 22.15
1294 if (adev->asic_type == CHIP_FIJI) {
1297 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1298 /* force vPost if error occured */
1302 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1303 if (fw_ver < 0x00160e00)
1308 /* Don't post if we need to reset whole hive on init */
1309 if (adev->gmc.xgmi.pending_reset)
1312 if (adev->has_hw_reset) {
1313 adev->has_hw_reset = false;
1317 /* bios scratch used on CIK+ */
1318 if (adev->asic_type >= CHIP_BONAIRE)
1319 return amdgpu_atombios_scratch_need_asic_init(adev);
1321 /* check MEM_SIZE for older asics */
1322 reg = amdgpu_asic_get_config_memsize(adev);
1324 if ((reg != 0) && (reg != 0xffffffff))
1331 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1333 * @adev: amdgpu_device pointer
1335 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1336 * be set for this device.
1338 * Returns true if it should be used or false if not.
1340 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1342 switch (amdgpu_aspm) {
1352 return pcie_aspm_enabled(adev->pdev);
1355 /* if we get transitioned to only one device, take VGA back */
1357 * amdgpu_device_vga_set_decode - enable/disable vga decode
1359 * @pdev: PCI device pointer
1360 * @state: enable/disable vga decode
1362 * Enable/disable vga decode (all asics).
1363 * Returns VGA resource flags.
1365 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1368 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1369 amdgpu_asic_set_vga_state(adev, state);
1371 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1372 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1374 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1378 * amdgpu_device_check_block_size - validate the vm block size
1380 * @adev: amdgpu_device pointer
1382 * Validates the vm block size specified via module parameter.
1383 * The vm block size defines number of bits in page table versus page directory,
1384 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1385 * page table and the remaining bits are in the page directory.
1387 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1389 /* defines number of bits in page table versus page directory,
1390 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1391 * page table and the remaining bits are in the page directory */
1392 if (amdgpu_vm_block_size == -1)
1395 if (amdgpu_vm_block_size < 9) {
1396 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1397 amdgpu_vm_block_size);
1398 amdgpu_vm_block_size = -1;
1403 * amdgpu_device_check_vm_size - validate the vm size
1405 * @adev: amdgpu_device pointer
1407 * Validates the vm size in GB specified via module parameter.
1408 * The VM size is the size of the GPU virtual memory space in GB.
1410 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1412 /* no need to check the default value */
1413 if (amdgpu_vm_size == -1)
1416 if (amdgpu_vm_size < 1) {
1417 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1419 amdgpu_vm_size = -1;
1423 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1426 bool is_os_64 = (sizeof(void *) == 8);
1427 uint64_t total_memory;
1428 uint64_t dram_size_seven_GB = 0x1B8000000;
1429 uint64_t dram_size_three_GB = 0xB8000000;
1431 if (amdgpu_smu_memory_pool_size == 0)
1435 DRM_WARN("Not 64-bit OS, feature not supported\n");
1439 total_memory = (uint64_t)si.totalram * si.mem_unit;
1441 if ((amdgpu_smu_memory_pool_size == 1) ||
1442 (amdgpu_smu_memory_pool_size == 2)) {
1443 if (total_memory < dram_size_three_GB)
1445 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1446 (amdgpu_smu_memory_pool_size == 8)) {
1447 if (total_memory < dram_size_seven_GB)
1450 DRM_WARN("Smu memory pool size not supported\n");
1453 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1458 DRM_WARN("No enough system memory\n");
1460 adev->pm.smu_prv_buffer_size = 0;
1463 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1465 if (!(adev->flags & AMD_IS_APU) ||
1466 adev->asic_type < CHIP_RAVEN)
1469 switch (adev->asic_type) {
1471 if (adev->pdev->device == 0x15dd)
1472 adev->apu_flags |= AMD_APU_IS_RAVEN;
1473 if (adev->pdev->device == 0x15d8)
1474 adev->apu_flags |= AMD_APU_IS_PICASSO;
1477 if ((adev->pdev->device == 0x1636) ||
1478 (adev->pdev->device == 0x164c))
1479 adev->apu_flags |= AMD_APU_IS_RENOIR;
1481 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1484 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1486 case CHIP_YELLOW_CARP:
1488 case CHIP_CYAN_SKILLFISH:
1489 if ((adev->pdev->device == 0x13FE) ||
1490 (adev->pdev->device == 0x143F))
1491 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1501 * amdgpu_device_check_arguments - validate module params
1503 * @adev: amdgpu_device pointer
1505 * Validates certain module parameters and updates
1506 * the associated values used by the driver (all asics).
1508 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1510 if (amdgpu_sched_jobs < 4) {
1511 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1513 amdgpu_sched_jobs = 4;
1514 } else if (!is_power_of_2(amdgpu_sched_jobs)){
1515 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1517 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1520 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1521 /* gart size must be greater or equal to 32M */
1522 dev_warn(adev->dev, "gart size (%d) too small\n",
1524 amdgpu_gart_size = -1;
1527 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1528 /* gtt size must be greater or equal to 32M */
1529 dev_warn(adev->dev, "gtt size (%d) too small\n",
1531 amdgpu_gtt_size = -1;
1534 /* valid range is between 4 and 9 inclusive */
1535 if (amdgpu_vm_fragment_size != -1 &&
1536 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1537 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1538 amdgpu_vm_fragment_size = -1;
1541 if (amdgpu_sched_hw_submission < 2) {
1542 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1543 amdgpu_sched_hw_submission);
1544 amdgpu_sched_hw_submission = 2;
1545 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1546 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1547 amdgpu_sched_hw_submission);
1548 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1551 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1552 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1553 amdgpu_reset_method = -1;
1556 amdgpu_device_check_smu_prv_buffer_size(adev);
1558 amdgpu_device_check_vm_size(adev);
1560 amdgpu_device_check_block_size(adev);
1562 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1568 * amdgpu_switcheroo_set_state - set switcheroo state
1570 * @pdev: pci dev pointer
1571 * @state: vga_switcheroo state
1573 * Callback for the switcheroo driver. Suspends or resumes
1574 * the asics before or after it is powered up using ACPI methods.
1576 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1577 enum vga_switcheroo_state state)
1579 struct drm_device *dev = pci_get_drvdata(pdev);
1582 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1585 if (state == VGA_SWITCHEROO_ON) {
1586 pr_info("switched on\n");
1587 /* don't suspend or resume card normally */
1588 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1590 pci_set_power_state(pdev, PCI_D0);
1591 amdgpu_device_load_pci_state(pdev);
1592 r = pci_enable_device(pdev);
1594 DRM_WARN("pci_enable_device failed (%d)\n", r);
1595 amdgpu_device_resume(dev, true);
1597 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1599 pr_info("switched off\n");
1600 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1601 amdgpu_device_suspend(dev, true);
1602 amdgpu_device_cache_pci_state(pdev);
1603 /* Shut down the device */
1604 pci_disable_device(pdev);
1605 pci_set_power_state(pdev, PCI_D3cold);
1606 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1611 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1613 * @pdev: pci dev pointer
1615 * Callback for the switcheroo driver. Check of the switcheroo
1616 * state can be changed.
1617 * Returns true if the state can be changed, false if not.
1619 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1621 struct drm_device *dev = pci_get_drvdata(pdev);
1624 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1625 * locking inversion with the driver load path. And the access here is
1626 * completely racy anyway. So don't bother with locking for now.
1628 return atomic_read(&dev->open_count) == 0;
1631 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1632 .set_gpu_state = amdgpu_switcheroo_set_state,
1634 .can_switch = amdgpu_switcheroo_can_switch,
1638 * amdgpu_device_ip_set_clockgating_state - set the CG state
1640 * @dev: amdgpu_device pointer
1641 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1642 * @state: clockgating state (gate or ungate)
1644 * Sets the requested clockgating state for all instances of
1645 * the hardware IP specified.
1646 * Returns the error code from the last instance.
1648 int amdgpu_device_ip_set_clockgating_state(void *dev,
1649 enum amd_ip_block_type block_type,
1650 enum amd_clockgating_state state)
1652 struct amdgpu_device *adev = dev;
1655 for (i = 0; i < adev->num_ip_blocks; i++) {
1656 if (!adev->ip_blocks[i].status.valid)
1658 if (adev->ip_blocks[i].version->type != block_type)
1660 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1662 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1663 (void *)adev, state);
1665 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1666 adev->ip_blocks[i].version->funcs->name, r);
1672 * amdgpu_device_ip_set_powergating_state - set the PG state
1674 * @dev: amdgpu_device pointer
1675 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1676 * @state: powergating state (gate or ungate)
1678 * Sets the requested powergating state for all instances of
1679 * the hardware IP specified.
1680 * Returns the error code from the last instance.
1682 int amdgpu_device_ip_set_powergating_state(void *dev,
1683 enum amd_ip_block_type block_type,
1684 enum amd_powergating_state state)
1686 struct amdgpu_device *adev = dev;
1689 for (i = 0; i < adev->num_ip_blocks; i++) {
1690 if (!adev->ip_blocks[i].status.valid)
1692 if (adev->ip_blocks[i].version->type != block_type)
1694 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1696 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1697 (void *)adev, state);
1699 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1700 adev->ip_blocks[i].version->funcs->name, r);
1706 * amdgpu_device_ip_get_clockgating_state - get the CG state
1708 * @adev: amdgpu_device pointer
1709 * @flags: clockgating feature flags
1711 * Walks the list of IPs on the device and updates the clockgating
1712 * flags for each IP.
1713 * Updates @flags with the feature flags for each hardware IP where
1714 * clockgating is enabled.
1716 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1721 for (i = 0; i < adev->num_ip_blocks; i++) {
1722 if (!adev->ip_blocks[i].status.valid)
1724 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1725 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1730 * amdgpu_device_ip_wait_for_idle - wait for idle
1732 * @adev: amdgpu_device pointer
1733 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1735 * Waits for the request hardware IP to be idle.
1736 * Returns 0 for success or a negative error code on failure.
1738 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1739 enum amd_ip_block_type block_type)
1743 for (i = 0; i < adev->num_ip_blocks; i++) {
1744 if (!adev->ip_blocks[i].status.valid)
1746 if (adev->ip_blocks[i].version->type == block_type) {
1747 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1758 * amdgpu_device_ip_is_idle - is the hardware IP idle
1760 * @adev: amdgpu_device pointer
1761 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1763 * Check if the hardware IP is idle or not.
1764 * Returns true if it the IP is idle, false if not.
1766 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1767 enum amd_ip_block_type block_type)
1771 for (i = 0; i < adev->num_ip_blocks; i++) {
1772 if (!adev->ip_blocks[i].status.valid)
1774 if (adev->ip_blocks[i].version->type == block_type)
1775 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1782 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1784 * @adev: amdgpu_device pointer
1785 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1787 * Returns a pointer to the hardware IP block structure
1788 * if it exists for the asic, otherwise NULL.
1790 struct amdgpu_ip_block *
1791 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1792 enum amd_ip_block_type type)
1796 for (i = 0; i < adev->num_ip_blocks; i++)
1797 if (adev->ip_blocks[i].version->type == type)
1798 return &adev->ip_blocks[i];
1804 * amdgpu_device_ip_block_version_cmp
1806 * @adev: amdgpu_device pointer
1807 * @type: enum amd_ip_block_type
1808 * @major: major version
1809 * @minor: minor version
1811 * return 0 if equal or greater
1812 * return 1 if smaller or the ip_block doesn't exist
1814 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1815 enum amd_ip_block_type type,
1816 u32 major, u32 minor)
1818 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1820 if (ip_block && ((ip_block->version->major > major) ||
1821 ((ip_block->version->major == major) &&
1822 (ip_block->version->minor >= minor))))
1829 * amdgpu_device_ip_block_add
1831 * @adev: amdgpu_device pointer
1832 * @ip_block_version: pointer to the IP to add
1834 * Adds the IP block driver information to the collection of IPs
1837 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1838 const struct amdgpu_ip_block_version *ip_block_version)
1840 if (!ip_block_version)
1843 switch (ip_block_version->type) {
1844 case AMD_IP_BLOCK_TYPE_VCN:
1845 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1848 case AMD_IP_BLOCK_TYPE_JPEG:
1849 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1856 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1857 ip_block_version->funcs->name);
1859 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1865 * amdgpu_device_enable_virtual_display - enable virtual display feature
1867 * @adev: amdgpu_device pointer
1869 * Enabled the virtual display feature if the user has enabled it via
1870 * the module parameter virtual_display. This feature provides a virtual
1871 * display hardware on headless boards or in virtualized environments.
1872 * This function parses and validates the configuration string specified by
1873 * the user and configues the virtual display configuration (number of
1874 * virtual connectors, crtcs, etc.) specified.
1876 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1878 adev->enable_virtual_display = false;
1880 if (amdgpu_virtual_display) {
1881 const char *pci_address_name = pci_name(adev->pdev);
1882 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1884 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1885 pciaddstr_tmp = pciaddstr;
1886 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1887 pciaddname = strsep(&pciaddname_tmp, ",");
1888 if (!strcmp("all", pciaddname)
1889 || !strcmp(pci_address_name, pciaddname)) {
1893 adev->enable_virtual_display = true;
1896 res = kstrtol(pciaddname_tmp, 10,
1904 adev->mode_info.num_crtc = num_crtc;
1906 adev->mode_info.num_crtc = 1;
1912 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1913 amdgpu_virtual_display, pci_address_name,
1914 adev->enable_virtual_display, adev->mode_info.num_crtc);
1920 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
1922 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
1923 adev->mode_info.num_crtc = 1;
1924 adev->enable_virtual_display = true;
1925 DRM_INFO("virtual_display:%d, num_crtc:%d\n",
1926 adev->enable_virtual_display, adev->mode_info.num_crtc);
1931 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1933 * @adev: amdgpu_device pointer
1935 * Parses the asic configuration parameters specified in the gpu info
1936 * firmware and makes them availale to the driver for use in configuring
1938 * Returns 0 on success, -EINVAL on failure.
1940 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1942 const char *chip_name;
1945 const struct gpu_info_firmware_header_v1_0 *hdr;
1947 adev->firmware.gpu_info_fw = NULL;
1949 if (adev->mman.discovery_bin) {
1951 * FIXME: The bounding box is still needed by Navi12, so
1952 * temporarily read it from gpu_info firmware. Should be dropped
1953 * when DAL no longer needs it.
1955 if (adev->asic_type != CHIP_NAVI12)
1959 switch (adev->asic_type) {
1963 chip_name = "vega10";
1966 chip_name = "vega12";
1969 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1970 chip_name = "raven2";
1971 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1972 chip_name = "picasso";
1974 chip_name = "raven";
1977 chip_name = "arcturus";
1980 chip_name = "navi12";
1984 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1985 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1988 "Failed to load gpu_info firmware \"%s\"\n",
1992 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1995 "Failed to validate gpu_info firmware \"%s\"\n",
2000 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2001 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2003 switch (hdr->version_major) {
2006 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2007 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2008 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2011 * Should be droped when DAL no longer needs it.
2013 if (adev->asic_type == CHIP_NAVI12)
2014 goto parse_soc_bounding_box;
2016 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2017 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2018 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2019 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2020 adev->gfx.config.max_texture_channel_caches =
2021 le32_to_cpu(gpu_info_fw->gc_num_tccs);
2022 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2023 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2024 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2025 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2026 adev->gfx.config.double_offchip_lds_buf =
2027 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2028 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2029 adev->gfx.cu_info.max_waves_per_simd =
2030 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2031 adev->gfx.cu_info.max_scratch_slots_per_cu =
2032 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2033 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2034 if (hdr->version_minor >= 1) {
2035 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2036 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2037 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2038 adev->gfx.config.num_sc_per_sh =
2039 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2040 adev->gfx.config.num_packer_per_sc =
2041 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2044 parse_soc_bounding_box:
2046 * soc bounding box info is not integrated in disocovery table,
2047 * we always need to parse it from gpu info firmware if needed.
2049 if (hdr->version_minor == 2) {
2050 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2051 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2052 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2053 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2059 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2068 * amdgpu_device_ip_early_init - run early init for hardware IPs
2070 * @adev: amdgpu_device pointer
2072 * Early initialization pass for hardware IPs. The hardware IPs that make
2073 * up each asic are discovered each IP's early_init callback is run. This
2074 * is the first stage in initializing the asic.
2075 * Returns 0 on success, negative error code on failure.
2077 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2079 struct drm_device *dev = adev_to_drm(adev);
2080 struct pci_dev *parent;
2083 amdgpu_device_enable_virtual_display(adev);
2085 if (amdgpu_sriov_vf(adev)) {
2086 r = amdgpu_virt_request_full_gpu(adev, true);
2091 switch (adev->asic_type) {
2092 #ifdef CONFIG_DRM_AMDGPU_SI
2098 adev->family = AMDGPU_FAMILY_SI;
2099 r = si_set_ip_blocks(adev);
2104 #ifdef CONFIG_DRM_AMDGPU_CIK
2110 if (adev->flags & AMD_IS_APU)
2111 adev->family = AMDGPU_FAMILY_KV;
2113 adev->family = AMDGPU_FAMILY_CI;
2115 r = cik_set_ip_blocks(adev);
2123 case CHIP_POLARIS10:
2124 case CHIP_POLARIS11:
2125 case CHIP_POLARIS12:
2129 if (adev->flags & AMD_IS_APU)
2130 adev->family = AMDGPU_FAMILY_CZ;
2132 adev->family = AMDGPU_FAMILY_VI;
2134 r = vi_set_ip_blocks(adev);
2139 r = amdgpu_discovery_set_ip_blocks(adev);
2145 if (amdgpu_has_atpx() &&
2146 (amdgpu_is_atpx_hybrid() ||
2147 amdgpu_has_atpx_dgpu_power_cntl()) &&
2148 ((adev->flags & AMD_IS_APU) == 0) &&
2149 !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2150 adev->flags |= AMD_IS_PX;
2152 if (!(adev->flags & AMD_IS_APU)) {
2153 parent = pci_upstream_bridge(adev->pdev);
2154 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2157 amdgpu_amdkfd_device_probe(adev);
2159 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2160 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2161 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2162 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2163 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2165 for (i = 0; i < adev->num_ip_blocks; i++) {
2166 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2167 DRM_ERROR("disabled ip block: %d <%s>\n",
2168 i, adev->ip_blocks[i].version->funcs->name);
2169 adev->ip_blocks[i].status.valid = false;
2171 if (adev->ip_blocks[i].version->funcs->early_init) {
2172 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2174 adev->ip_blocks[i].status.valid = false;
2176 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2177 adev->ip_blocks[i].version->funcs->name, r);
2180 adev->ip_blocks[i].status.valid = true;
2183 adev->ip_blocks[i].status.valid = true;
2186 /* get the vbios after the asic_funcs are set up */
2187 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2188 r = amdgpu_device_parse_gpu_info_fw(adev);
2193 if (!amdgpu_get_bios(adev))
2196 r = amdgpu_atombios_init(adev);
2198 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2199 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2203 /*get pf2vf msg info at it's earliest time*/
2204 if (amdgpu_sriov_vf(adev))
2205 amdgpu_virt_init_data_exchange(adev);
2210 adev->cg_flags &= amdgpu_cg_mask;
2211 adev->pg_flags &= amdgpu_pg_mask;
2216 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2220 for (i = 0; i < adev->num_ip_blocks; i++) {
2221 if (!adev->ip_blocks[i].status.sw)
2223 if (adev->ip_blocks[i].status.hw)
2225 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2226 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2227 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2228 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2230 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2231 adev->ip_blocks[i].version->funcs->name, r);
2234 adev->ip_blocks[i].status.hw = true;
2241 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2245 for (i = 0; i < adev->num_ip_blocks; i++) {
2246 if (!adev->ip_blocks[i].status.sw)
2248 if (adev->ip_blocks[i].status.hw)
2250 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2252 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2253 adev->ip_blocks[i].version->funcs->name, r);
2256 adev->ip_blocks[i].status.hw = true;
2262 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2266 uint32_t smu_version;
2268 if (adev->asic_type >= CHIP_VEGA10) {
2269 for (i = 0; i < adev->num_ip_blocks; i++) {
2270 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2273 if (!adev->ip_blocks[i].status.sw)
2276 /* no need to do the fw loading again if already done*/
2277 if (adev->ip_blocks[i].status.hw == true)
2280 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2281 r = adev->ip_blocks[i].version->funcs->resume(adev);
2283 DRM_ERROR("resume of IP block <%s> failed %d\n",
2284 adev->ip_blocks[i].version->funcs->name, r);
2288 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2290 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2291 adev->ip_blocks[i].version->funcs->name, r);
2296 adev->ip_blocks[i].status.hw = true;
2301 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2302 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2307 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2312 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2313 struct amdgpu_ring *ring = adev->rings[i];
2315 /* No need to setup the GPU scheduler for rings that don't need it */
2316 if (!ring || ring->no_scheduler)
2319 switch (ring->funcs->type) {
2320 case AMDGPU_RING_TYPE_GFX:
2321 timeout = adev->gfx_timeout;
2323 case AMDGPU_RING_TYPE_COMPUTE:
2324 timeout = adev->compute_timeout;
2326 case AMDGPU_RING_TYPE_SDMA:
2327 timeout = adev->sdma_timeout;
2330 timeout = adev->video_timeout;
2334 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2335 ring->num_hw_submission, amdgpu_job_hang_limit,
2336 timeout, adev->reset_domain->wq,
2337 ring->sched_score, ring->name,
2340 DRM_ERROR("Failed to create scheduler on ring %s.\n",
2351 * amdgpu_device_ip_init - run init for hardware IPs
2353 * @adev: amdgpu_device pointer
2355 * Main initialization pass for hardware IPs. The list of all the hardware
2356 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2357 * are run. sw_init initializes the software state associated with each IP
2358 * and hw_init initializes the hardware associated with each IP.
2359 * Returns 0 on success, negative error code on failure.
2361 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2365 r = amdgpu_ras_init(adev);
2369 for (i = 0; i < adev->num_ip_blocks; i++) {
2370 if (!adev->ip_blocks[i].status.valid)
2372 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2374 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2375 adev->ip_blocks[i].version->funcs->name, r);
2378 adev->ip_blocks[i].status.sw = true;
2380 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2381 /* need to do common hw init early so everything is set up for gmc */
2382 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2384 DRM_ERROR("hw_init %d failed %d\n", i, r);
2387 adev->ip_blocks[i].status.hw = true;
2388 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2389 /* need to do gmc hw init early so we can allocate gpu mem */
2390 /* Try to reserve bad pages early */
2391 if (amdgpu_sriov_vf(adev))
2392 amdgpu_virt_exchange_data(adev);
2394 r = amdgpu_device_mem_scratch_init(adev);
2396 DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
2399 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2401 DRM_ERROR("hw_init %d failed %d\n", i, r);
2404 r = amdgpu_device_wb_init(adev);
2406 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2409 adev->ip_blocks[i].status.hw = true;
2411 /* right after GMC hw init, we create CSA */
2413 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2414 AMDGPU_GEM_DOMAIN_VRAM |
2415 AMDGPU_GEM_DOMAIN_GTT,
2418 DRM_ERROR("allocate CSA failed %d\n", r);
2425 if (amdgpu_sriov_vf(adev))
2426 amdgpu_virt_init_data_exchange(adev);
2428 r = amdgpu_ib_pool_init(adev);
2430 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2431 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2435 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2439 r = amdgpu_device_ip_hw_init_phase1(adev);
2443 r = amdgpu_device_fw_loading(adev);
2447 r = amdgpu_device_ip_hw_init_phase2(adev);
2452 * retired pages will be loaded from eeprom and reserved here,
2453 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2454 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2455 * for I2C communication which only true at this point.
2457 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2458 * failure from bad gpu situation and stop amdgpu init process
2459 * accordingly. For other failed cases, it will still release all
2460 * the resource and print error message, rather than returning one
2461 * negative value to upper level.
2463 * Note: theoretically, this should be called before all vram allocations
2464 * to protect retired page from abusing
2466 r = amdgpu_ras_recovery_init(adev);
2471 * In case of XGMI grab extra reference for reset domain for this device
2473 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2474 if (amdgpu_xgmi_add_device(adev) == 0) {
2475 if (!amdgpu_sriov_vf(adev)) {
2476 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2478 if (WARN_ON(!hive)) {
2483 if (!hive->reset_domain ||
2484 !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2486 amdgpu_put_xgmi_hive(hive);
2490 /* Drop the early temporary reset domain we created for device */
2491 amdgpu_reset_put_reset_domain(adev->reset_domain);
2492 adev->reset_domain = hive->reset_domain;
2493 amdgpu_put_xgmi_hive(hive);
2498 r = amdgpu_device_init_schedulers(adev);
2502 /* Don't init kfd if whole hive need to be reset during init */
2503 if (!adev->gmc.xgmi.pending_reset)
2504 amdgpu_amdkfd_device_init(adev);
2506 amdgpu_fru_get_product_info(adev);
2509 if (amdgpu_sriov_vf(adev))
2510 amdgpu_virt_release_full_gpu(adev, true);
2516 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2518 * @adev: amdgpu_device pointer
2520 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2521 * this function before a GPU reset. If the value is retained after a
2522 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2524 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2526 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2530 * amdgpu_device_check_vram_lost - check if vram is valid
2532 * @adev: amdgpu_device pointer
2534 * Checks the reset magic value written to the gart pointer in VRAM.
2535 * The driver calls this after a GPU reset to see if the contents of
2536 * VRAM is lost or now.
2537 * returns true if vram is lost, false if not.
2539 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2541 if (memcmp(adev->gart.ptr, adev->reset_magic,
2542 AMDGPU_RESET_MAGIC_NUM))
2545 if (!amdgpu_in_reset(adev))
2549 * For all ASICs with baco/mode1 reset, the VRAM is
2550 * always assumed to be lost.
2552 switch (amdgpu_asic_reset_method(adev)) {
2553 case AMD_RESET_METHOD_BACO:
2554 case AMD_RESET_METHOD_MODE1:
2562 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2564 * @adev: amdgpu_device pointer
2565 * @state: clockgating state (gate or ungate)
2567 * The list of all the hardware IPs that make up the asic is walked and the
2568 * set_clockgating_state callbacks are run.
2569 * Late initialization pass enabling clockgating for hardware IPs.
2570 * Fini or suspend, pass disabling clockgating for hardware IPs.
2571 * Returns 0 on success, negative error code on failure.
2574 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2575 enum amd_clockgating_state state)
2579 if (amdgpu_emu_mode == 1)
2582 for (j = 0; j < adev->num_ip_blocks; j++) {
2583 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2584 if (!adev->ip_blocks[i].status.late_initialized)
2586 /* skip CG for GFX, SDMA on S0ix */
2587 if (adev->in_s0ix &&
2588 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2589 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2591 /* skip CG for VCE/UVD, it's handled specially */
2592 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2593 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2594 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2595 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2596 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2597 /* enable clockgating to save power */
2598 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2601 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2602 adev->ip_blocks[i].version->funcs->name, r);
2611 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2612 enum amd_powergating_state state)
2616 if (amdgpu_emu_mode == 1)
2619 for (j = 0; j < adev->num_ip_blocks; j++) {
2620 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2621 if (!adev->ip_blocks[i].status.late_initialized)
2623 /* skip PG for GFX, SDMA on S0ix */
2624 if (adev->in_s0ix &&
2625 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2626 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2628 /* skip CG for VCE/UVD, it's handled specially */
2629 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2630 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2631 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2632 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2633 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2634 /* enable powergating to save power */
2635 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2638 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2639 adev->ip_blocks[i].version->funcs->name, r);
2647 static int amdgpu_device_enable_mgpu_fan_boost(void)
2649 struct amdgpu_gpu_instance *gpu_ins;
2650 struct amdgpu_device *adev;
2653 mutex_lock(&mgpu_info.mutex);
2656 * MGPU fan boost feature should be enabled
2657 * only when there are two or more dGPUs in
2660 if (mgpu_info.num_dgpu < 2)
2663 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2664 gpu_ins = &(mgpu_info.gpu_ins[i]);
2665 adev = gpu_ins->adev;
2666 if (!(adev->flags & AMD_IS_APU) &&
2667 !gpu_ins->mgpu_fan_enabled) {
2668 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2672 gpu_ins->mgpu_fan_enabled = 1;
2677 mutex_unlock(&mgpu_info.mutex);
2683 * amdgpu_device_ip_late_init - run late init for hardware IPs
2685 * @adev: amdgpu_device pointer
2687 * Late initialization pass for hardware IPs. The list of all the hardware
2688 * IPs that make up the asic is walked and the late_init callbacks are run.
2689 * late_init covers any special initialization that an IP requires
2690 * after all of the have been initialized or something that needs to happen
2691 * late in the init process.
2692 * Returns 0 on success, negative error code on failure.
2694 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2696 struct amdgpu_gpu_instance *gpu_instance;
2699 for (i = 0; i < adev->num_ip_blocks; i++) {
2700 if (!adev->ip_blocks[i].status.hw)
2702 if (adev->ip_blocks[i].version->funcs->late_init) {
2703 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2705 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2706 adev->ip_blocks[i].version->funcs->name, r);
2710 adev->ip_blocks[i].status.late_initialized = true;
2713 r = amdgpu_ras_late_init(adev);
2715 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2719 amdgpu_ras_set_error_query_ready(adev, true);
2721 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2722 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2724 amdgpu_device_fill_reset_magic(adev);
2726 r = amdgpu_device_enable_mgpu_fan_boost();
2728 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2730 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2731 if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2732 adev->asic_type == CHIP_ALDEBARAN ))
2733 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2735 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2736 mutex_lock(&mgpu_info.mutex);
2739 * Reset device p-state to low as this was booted with high.
2741 * This should be performed only after all devices from the same
2742 * hive get initialized.
2744 * However, it's unknown how many device in the hive in advance.
2745 * As this is counted one by one during devices initializations.
2747 * So, we wait for all XGMI interlinked devices initialized.
2748 * This may bring some delays as those devices may come from
2749 * different hives. But that should be OK.
2751 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2752 for (i = 0; i < mgpu_info.num_gpu; i++) {
2753 gpu_instance = &(mgpu_info.gpu_ins[i]);
2754 if (gpu_instance->adev->flags & AMD_IS_APU)
2757 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2758 AMDGPU_XGMI_PSTATE_MIN);
2760 DRM_ERROR("pstate setting failed (%d).\n", r);
2766 mutex_unlock(&mgpu_info.mutex);
2773 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2775 * @adev: amdgpu_device pointer
2777 * For ASICs need to disable SMC first
2779 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2783 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2786 for (i = 0; i < adev->num_ip_blocks; i++) {
2787 if (!adev->ip_blocks[i].status.hw)
2789 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2790 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2791 /* XXX handle errors */
2793 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2794 adev->ip_blocks[i].version->funcs->name, r);
2796 adev->ip_blocks[i].status.hw = false;
2802 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2806 for (i = 0; i < adev->num_ip_blocks; i++) {
2807 if (!adev->ip_blocks[i].version->funcs->early_fini)
2810 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2812 DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2813 adev->ip_blocks[i].version->funcs->name, r);
2817 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2818 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2820 amdgpu_amdkfd_suspend(adev, false);
2822 /* Workaroud for ASICs need to disable SMC first */
2823 amdgpu_device_smu_fini_early(adev);
2825 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2826 if (!adev->ip_blocks[i].status.hw)
2829 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2830 /* XXX handle errors */
2832 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2833 adev->ip_blocks[i].version->funcs->name, r);
2836 adev->ip_blocks[i].status.hw = false;
2839 if (amdgpu_sriov_vf(adev)) {
2840 if (amdgpu_virt_release_full_gpu(adev, false))
2841 DRM_ERROR("failed to release exclusive mode on fini\n");
2848 * amdgpu_device_ip_fini - run fini for hardware IPs
2850 * @adev: amdgpu_device pointer
2852 * Main teardown pass for hardware IPs. The list of all the hardware
2853 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2854 * are run. hw_fini tears down the hardware associated with each IP
2855 * and sw_fini tears down any software state associated with each IP.
2856 * Returns 0 on success, negative error code on failure.
2858 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2862 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2863 amdgpu_virt_release_ras_err_handler_data(adev);
2865 if (adev->gmc.xgmi.num_physical_nodes > 1)
2866 amdgpu_xgmi_remove_device(adev);
2868 amdgpu_amdkfd_device_fini_sw(adev);
2870 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2871 if (!adev->ip_blocks[i].status.sw)
2874 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2875 amdgpu_ucode_free_bo(adev);
2876 amdgpu_free_static_csa(&adev->virt.csa_obj);
2877 amdgpu_device_wb_fini(adev);
2878 amdgpu_device_mem_scratch_fini(adev);
2879 amdgpu_ib_pool_fini(adev);
2882 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2883 /* XXX handle errors */
2885 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2886 adev->ip_blocks[i].version->funcs->name, r);
2888 adev->ip_blocks[i].status.sw = false;
2889 adev->ip_blocks[i].status.valid = false;
2892 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2893 if (!adev->ip_blocks[i].status.late_initialized)
2895 if (adev->ip_blocks[i].version->funcs->late_fini)
2896 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2897 adev->ip_blocks[i].status.late_initialized = false;
2900 amdgpu_ras_fini(adev);
2906 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2908 * @work: work_struct.
2910 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2912 struct amdgpu_device *adev =
2913 container_of(work, struct amdgpu_device, delayed_init_work.work);
2916 r = amdgpu_ib_ring_tests(adev);
2918 DRM_ERROR("ib ring test failed (%d).\n", r);
2921 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2923 struct amdgpu_device *adev =
2924 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2926 WARN_ON_ONCE(adev->gfx.gfx_off_state);
2927 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2929 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2930 adev->gfx.gfx_off_state = true;
2934 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2936 * @adev: amdgpu_device pointer
2938 * Main suspend function for hardware IPs. The list of all the hardware
2939 * IPs that make up the asic is walked, clockgating is disabled and the
2940 * suspend callbacks are run. suspend puts the hardware and software state
2941 * in each IP into a state suitable for suspend.
2942 * Returns 0 on success, negative error code on failure.
2944 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2948 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2949 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2952 * Per PMFW team's suggestion, driver needs to handle gfxoff
2953 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2954 * scenario. Add the missing df cstate disablement here.
2956 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2957 dev_warn(adev->dev, "Failed to disallow df cstate");
2959 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2960 if (!adev->ip_blocks[i].status.valid)
2963 /* displays are handled separately */
2964 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2967 /* XXX handle errors */
2968 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2969 /* XXX handle errors */
2971 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2972 adev->ip_blocks[i].version->funcs->name, r);
2976 adev->ip_blocks[i].status.hw = false;
2983 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2985 * @adev: amdgpu_device pointer
2987 * Main suspend function for hardware IPs. The list of all the hardware
2988 * IPs that make up the asic is walked, clockgating is disabled and the
2989 * suspend callbacks are run. suspend puts the hardware and software state
2990 * in each IP into a state suitable for suspend.
2991 * Returns 0 on success, negative error code on failure.
2993 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2998 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3000 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3001 if (!adev->ip_blocks[i].status.valid)
3003 /* displays are handled in phase1 */
3004 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3006 /* PSP lost connection when err_event_athub occurs */
3007 if (amdgpu_ras_intr_triggered() &&
3008 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3009 adev->ip_blocks[i].status.hw = false;
3013 /* skip unnecessary suspend if we do not initialize them yet */
3014 if (adev->gmc.xgmi.pending_reset &&
3015 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3016 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3017 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3018 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3019 adev->ip_blocks[i].status.hw = false;
3023 /* skip suspend of gfx/mes and psp for S0ix
3024 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3025 * like at runtime. PSP is also part of the always on hardware
3026 * so no need to suspend it.
3028 if (adev->in_s0ix &&
3029 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3030 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3031 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3034 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3035 if (adev->in_s0ix &&
3036 (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&
3037 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
3040 /* XXX handle errors */
3041 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3042 /* XXX handle errors */
3044 DRM_ERROR("suspend of IP block <%s> failed %d\n",
3045 adev->ip_blocks[i].version->funcs->name, r);
3047 adev->ip_blocks[i].status.hw = false;
3048 /* handle putting the SMC in the appropriate state */
3049 if(!amdgpu_sriov_vf(adev)){
3050 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3051 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3053 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3054 adev->mp1_state, r);
3065 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3067 * @adev: amdgpu_device pointer
3069 * Main suspend function for hardware IPs. The list of all the hardware
3070 * IPs that make up the asic is walked, clockgating is disabled and the
3071 * suspend callbacks are run. suspend puts the hardware and software state
3072 * in each IP into a state suitable for suspend.
3073 * Returns 0 on success, negative error code on failure.
3075 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3079 if (amdgpu_sriov_vf(adev)) {
3080 amdgpu_virt_fini_data_exchange(adev);
3081 amdgpu_virt_request_full_gpu(adev, false);
3084 r = amdgpu_device_ip_suspend_phase1(adev);
3087 r = amdgpu_device_ip_suspend_phase2(adev);
3089 if (amdgpu_sriov_vf(adev))
3090 amdgpu_virt_release_full_gpu(adev, false);
3095 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3099 static enum amd_ip_block_type ip_order[] = {
3100 AMD_IP_BLOCK_TYPE_COMMON,
3101 AMD_IP_BLOCK_TYPE_GMC,
3102 AMD_IP_BLOCK_TYPE_PSP,
3103 AMD_IP_BLOCK_TYPE_IH,
3106 for (i = 0; i < adev->num_ip_blocks; i++) {
3108 struct amdgpu_ip_block *block;
3110 block = &adev->ip_blocks[i];
3111 block->status.hw = false;
3113 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3115 if (block->version->type != ip_order[j] ||
3116 !block->status.valid)
3119 r = block->version->funcs->hw_init(adev);
3120 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3123 block->status.hw = true;
3130 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3134 static enum amd_ip_block_type ip_order[] = {
3135 AMD_IP_BLOCK_TYPE_SMC,
3136 AMD_IP_BLOCK_TYPE_DCE,
3137 AMD_IP_BLOCK_TYPE_GFX,
3138 AMD_IP_BLOCK_TYPE_SDMA,
3139 AMD_IP_BLOCK_TYPE_UVD,
3140 AMD_IP_BLOCK_TYPE_VCE,
3141 AMD_IP_BLOCK_TYPE_VCN
3144 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3146 struct amdgpu_ip_block *block;
3148 for (j = 0; j < adev->num_ip_blocks; j++) {
3149 block = &adev->ip_blocks[j];
3151 if (block->version->type != ip_order[i] ||
3152 !block->status.valid ||
3156 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3157 r = block->version->funcs->resume(adev);
3159 r = block->version->funcs->hw_init(adev);
3161 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3164 block->status.hw = true;
3172 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3174 * @adev: amdgpu_device pointer
3176 * First resume function for hardware IPs. The list of all the hardware
3177 * IPs that make up the asic is walked and the resume callbacks are run for
3178 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3179 * after a suspend and updates the software state as necessary. This
3180 * function is also used for restoring the GPU after a GPU reset.
3181 * Returns 0 on success, negative error code on failure.
3183 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3187 for (i = 0; i < adev->num_ip_blocks; i++) {
3188 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3190 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3191 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3192 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3193 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3195 r = adev->ip_blocks[i].version->funcs->resume(adev);
3197 DRM_ERROR("resume of IP block <%s> failed %d\n",
3198 adev->ip_blocks[i].version->funcs->name, r);
3201 adev->ip_blocks[i].status.hw = true;
3209 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3211 * @adev: amdgpu_device pointer
3213 * First resume function for hardware IPs. The list of all the hardware
3214 * IPs that make up the asic is walked and the resume callbacks are run for
3215 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3216 * functional state after a suspend and updates the software state as
3217 * necessary. This function is also used for restoring the GPU after a GPU
3219 * Returns 0 on success, negative error code on failure.
3221 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3225 for (i = 0; i < adev->num_ip_blocks; i++) {
3226 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3228 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3229 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3230 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3231 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3233 r = adev->ip_blocks[i].version->funcs->resume(adev);
3235 DRM_ERROR("resume of IP block <%s> failed %d\n",
3236 adev->ip_blocks[i].version->funcs->name, r);
3239 adev->ip_blocks[i].status.hw = true;
3246 * amdgpu_device_ip_resume - run resume for hardware IPs
3248 * @adev: amdgpu_device pointer
3250 * Main resume function for hardware IPs. The hardware IPs
3251 * are split into two resume functions because they are
3252 * are also used in in recovering from a GPU reset and some additional
3253 * steps need to be take between them. In this case (S3/S4) they are
3255 * Returns 0 on success, negative error code on failure.
3257 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3261 r = amdgpu_amdkfd_resume_iommu(adev);
3265 r = amdgpu_device_ip_resume_phase1(adev);
3269 r = amdgpu_device_fw_loading(adev);
3273 r = amdgpu_device_ip_resume_phase2(adev);
3279 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3281 * @adev: amdgpu_device pointer
3283 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3285 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3287 if (amdgpu_sriov_vf(adev)) {
3288 if (adev->is_atom_fw) {
3289 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3290 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3292 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3293 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3296 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3297 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3302 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3304 * @asic_type: AMD asic type
3306 * Check if there is DC (new modesetting infrastructre) support for an asic.
3307 * returns true if DC has support, false if not.
3309 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3311 switch (asic_type) {
3312 #ifdef CONFIG_DRM_AMDGPU_SI
3316 /* chips with no display hardware */
3318 #if defined(CONFIG_DRM_AMD_DC)
3324 * We have systems in the wild with these ASICs that require
3325 * LVDS and VGA support which is not supported with DC.
3327 * Fallback to the non-DC driver here by default so as not to
3328 * cause regressions.
3330 #if defined(CONFIG_DRM_AMD_DC_SI)
3331 return amdgpu_dc > 0;
3340 * We have systems in the wild with these ASICs that require
3341 * VGA support which is not supported with DC.
3343 * Fallback to the non-DC driver here by default so as not to
3344 * cause regressions.
3346 return amdgpu_dc > 0;
3348 return amdgpu_dc != 0;
3352 DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3353 "but isn't supported by ASIC, ignoring\n");
3360 * amdgpu_device_has_dc_support - check if dc is supported
3362 * @adev: amdgpu_device pointer
3364 * Returns true for supported, false for not supported
3366 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3368 if (adev->enable_virtual_display ||
3369 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3372 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3375 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3377 struct amdgpu_device *adev =
3378 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3379 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3381 /* It's a bug to not have a hive within this function */
3386 * Use task barrier to synchronize all xgmi reset works across the
3387 * hive. task_barrier_enter and task_barrier_exit will block
3388 * until all the threads running the xgmi reset works reach
3389 * those points. task_barrier_full will do both blocks.
3391 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3393 task_barrier_enter(&hive->tb);
3394 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3396 if (adev->asic_reset_res)
3399 task_barrier_exit(&hive->tb);
3400 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3402 if (adev->asic_reset_res)
3405 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3406 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3407 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3410 task_barrier_full(&hive->tb);
3411 adev->asic_reset_res = amdgpu_asic_reset(adev);
3415 if (adev->asic_reset_res)
3416 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3417 adev->asic_reset_res, adev_to_drm(adev)->unique);
3418 amdgpu_put_xgmi_hive(hive);
3421 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3423 char *input = amdgpu_lockup_timeout;
3424 char *timeout_setting = NULL;
3430 * By default timeout for non compute jobs is 10000
3431 * and 60000 for compute jobs.
3432 * In SR-IOV or passthrough mode, timeout for compute
3433 * jobs are 60000 by default.
3435 adev->gfx_timeout = msecs_to_jiffies(10000);
3436 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3437 if (amdgpu_sriov_vf(adev))
3438 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3439 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3441 adev->compute_timeout = msecs_to_jiffies(60000);
3443 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3444 while ((timeout_setting = strsep(&input, ",")) &&
3445 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3446 ret = kstrtol(timeout_setting, 0, &timeout);
3453 } else if (timeout < 0) {
3454 timeout = MAX_SCHEDULE_TIMEOUT;
3455 dev_warn(adev->dev, "lockup timeout disabled");
3456 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3458 timeout = msecs_to_jiffies(timeout);
3463 adev->gfx_timeout = timeout;
3466 adev->compute_timeout = timeout;
3469 adev->sdma_timeout = timeout;
3472 adev->video_timeout = timeout;
3479 * There is only one value specified and
3480 * it should apply to all non-compute jobs.
3483 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3484 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3485 adev->compute_timeout = adev->gfx_timeout;
3493 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3495 * @adev: amdgpu_device pointer
3497 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3499 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3501 struct iommu_domain *domain;
3503 domain = iommu_get_domain_for_dev(adev->dev);
3504 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3505 adev->ram_is_direct_mapped = true;
3508 static const struct attribute *amdgpu_dev_attributes[] = {
3509 &dev_attr_product_name.attr,
3510 &dev_attr_product_number.attr,
3511 &dev_attr_serial_number.attr,
3512 &dev_attr_pcie_replay_count.attr,
3517 * amdgpu_device_init - initialize the driver
3519 * @adev: amdgpu_device pointer
3520 * @flags: driver flags
3522 * Initializes the driver info and hw (all asics).
3523 * Returns 0 for success or an error on failure.
3524 * Called at driver startup.
3526 int amdgpu_device_init(struct amdgpu_device *adev,
3529 struct drm_device *ddev = adev_to_drm(adev);
3530 struct pci_dev *pdev = adev->pdev;
3535 adev->shutdown = false;
3536 adev->flags = flags;
3538 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3539 adev->asic_type = amdgpu_force_asic_type;
3541 adev->asic_type = flags & AMD_ASIC_MASK;
3543 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3544 if (amdgpu_emu_mode == 1)
3545 adev->usec_timeout *= 10;
3546 adev->gmc.gart_size = 512 * 1024 * 1024;
3547 adev->accel_working = false;
3548 adev->num_rings = 0;
3549 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3550 adev->mman.buffer_funcs = NULL;
3551 adev->mman.buffer_funcs_ring = NULL;
3552 adev->vm_manager.vm_pte_funcs = NULL;
3553 adev->vm_manager.vm_pte_num_scheds = 0;
3554 adev->gmc.gmc_funcs = NULL;
3555 adev->harvest_ip_mask = 0x0;
3556 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3557 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3559 adev->smc_rreg = &amdgpu_invalid_rreg;
3560 adev->smc_wreg = &amdgpu_invalid_wreg;
3561 adev->pcie_rreg = &amdgpu_invalid_rreg;
3562 adev->pcie_wreg = &amdgpu_invalid_wreg;
3563 adev->pciep_rreg = &amdgpu_invalid_rreg;
3564 adev->pciep_wreg = &amdgpu_invalid_wreg;
3565 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3566 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3567 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3568 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3569 adev->didt_rreg = &amdgpu_invalid_rreg;
3570 adev->didt_wreg = &amdgpu_invalid_wreg;
3571 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3572 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3573 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3574 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3576 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3577 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3578 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3580 /* mutex initialization are all done here so we
3581 * can recall function without having locking issues */
3582 mutex_init(&adev->firmware.mutex);
3583 mutex_init(&adev->pm.mutex);
3584 mutex_init(&adev->gfx.gpu_clock_mutex);
3585 mutex_init(&adev->srbm_mutex);
3586 mutex_init(&adev->gfx.pipe_reserve_mutex);
3587 mutex_init(&adev->gfx.gfx_off_mutex);
3588 mutex_init(&adev->grbm_idx_mutex);
3589 mutex_init(&adev->mn_lock);
3590 mutex_init(&adev->virt.vf_errors.lock);
3591 hash_init(adev->mn_hash);
3592 mutex_init(&adev->psp.mutex);
3593 mutex_init(&adev->notifier_lock);
3594 mutex_init(&adev->pm.stable_pstate_ctx_lock);
3595 mutex_init(&adev->benchmark_mutex);
3597 amdgpu_device_init_apu_flags(adev);
3599 r = amdgpu_device_check_arguments(adev);
3603 spin_lock_init(&adev->mmio_idx_lock);
3604 spin_lock_init(&adev->smc_idx_lock);
3605 spin_lock_init(&adev->pcie_idx_lock);
3606 spin_lock_init(&adev->uvd_ctx_idx_lock);
3607 spin_lock_init(&adev->didt_idx_lock);
3608 spin_lock_init(&adev->gc_cac_idx_lock);
3609 spin_lock_init(&adev->se_cac_idx_lock);
3610 spin_lock_init(&adev->audio_endpt_idx_lock);
3611 spin_lock_init(&adev->mm_stats.lock);
3613 INIT_LIST_HEAD(&adev->shadow_list);
3614 mutex_init(&adev->shadow_list_lock);
3616 INIT_LIST_HEAD(&adev->reset_list);
3618 INIT_LIST_HEAD(&adev->ras_list);
3620 INIT_DELAYED_WORK(&adev->delayed_init_work,
3621 amdgpu_device_delayed_init_work_handler);
3622 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3623 amdgpu_device_delay_enable_gfx_off);
3625 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3627 adev->gfx.gfx_off_req_count = 1;
3628 adev->gfx.gfx_off_residency = 0;
3629 adev->gfx.gfx_off_entrycount = 0;
3630 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3632 atomic_set(&adev->throttling_logging_enabled, 1);
3634 * If throttling continues, logging will be performed every minute
3635 * to avoid log flooding. "-1" is subtracted since the thermal
3636 * throttling interrupt comes every second. Thus, the total logging
3637 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3638 * for throttling interrupt) = 60 seconds.
3640 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3641 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3643 /* Registers mapping */
3644 /* TODO: block userspace mapping of io register */
3645 if (adev->asic_type >= CHIP_BONAIRE) {
3646 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3647 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3649 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3650 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3653 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3654 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3656 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3657 if (adev->rmmio == NULL) {
3660 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3661 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3663 amdgpu_device_get_pcie_info(adev);
3666 DRM_INFO("MCBP is enabled\n");
3669 * Reset domain needs to be present early, before XGMI hive discovered
3670 * (if any) and intitialized to use reset sem and in_gpu reset flag
3671 * early on during init and before calling to RREG32.
3673 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3674 if (!adev->reset_domain)
3677 /* detect hw virtualization here */
3678 amdgpu_detect_virtualization(adev);
3680 r = amdgpu_device_get_job_timeout_settings(adev);
3682 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3686 /* early init functions */
3687 r = amdgpu_device_ip_early_init(adev);
3691 /* Enable TMZ based on IP_VERSION */
3692 amdgpu_gmc_tmz_set(adev);
3694 amdgpu_gmc_noretry_set(adev);
3695 /* Need to get xgmi info early to decide the reset behavior*/
3696 if (adev->gmc.xgmi.supported) {
3697 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3702 /* enable PCIE atomic ops */
3703 if (amdgpu_sriov_vf(adev))
3704 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3705 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3706 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3708 adev->have_atomics_support =
3709 !pci_enable_atomic_ops_to_root(adev->pdev,
3710 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3711 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3712 if (!adev->have_atomics_support)
3713 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3715 /* doorbell bar mapping and doorbell index init*/
3716 amdgpu_device_doorbell_init(adev);
3718 if (amdgpu_emu_mode == 1) {
3719 /* post the asic on emulation mode */
3720 emu_soc_asic_init(adev);
3721 goto fence_driver_init;
3724 amdgpu_reset_init(adev);
3726 /* detect if we are with an SRIOV vbios */
3727 amdgpu_device_detect_sriov_bios(adev);
3729 /* check if we need to reset the asic
3730 * E.g., driver was not cleanly unloaded previously, etc.
3732 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3733 if (adev->gmc.xgmi.num_physical_nodes) {
3734 dev_info(adev->dev, "Pending hive reset.\n");
3735 adev->gmc.xgmi.pending_reset = true;
3736 /* Only need to init necessary block for SMU to handle the reset */
3737 for (i = 0; i < adev->num_ip_blocks; i++) {
3738 if (!adev->ip_blocks[i].status.valid)
3740 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3741 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3742 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3743 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3744 DRM_DEBUG("IP %s disabled for hw_init.\n",
3745 adev->ip_blocks[i].version->funcs->name);
3746 adev->ip_blocks[i].status.hw = true;
3750 r = amdgpu_asic_reset(adev);
3752 dev_err(adev->dev, "asic reset on init failed\n");
3758 pci_enable_pcie_error_reporting(adev->pdev);
3760 /* Post card if necessary */
3761 if (amdgpu_device_need_post(adev)) {
3763 dev_err(adev->dev, "no vBIOS found\n");
3767 DRM_INFO("GPU posting now...\n");
3768 r = amdgpu_device_asic_init(adev);
3770 dev_err(adev->dev, "gpu post error!\n");
3775 if (adev->is_atom_fw) {
3776 /* Initialize clocks */
3777 r = amdgpu_atomfirmware_get_clock_info(adev);
3779 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3780 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3784 /* Initialize clocks */
3785 r = amdgpu_atombios_get_clock_info(adev);
3787 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3788 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3791 /* init i2c buses */
3792 if (!amdgpu_device_has_dc_support(adev))
3793 amdgpu_atombios_i2c_init(adev);
3798 r = amdgpu_fence_driver_sw_init(adev);
3800 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3801 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3805 /* init the mode config */
3806 drm_mode_config_init(adev_to_drm(adev));
3808 r = amdgpu_device_ip_init(adev);
3810 /* failed in exclusive mode due to timeout */
3811 if (amdgpu_sriov_vf(adev) &&
3812 !amdgpu_sriov_runtime(adev) &&
3813 amdgpu_virt_mmio_blocked(adev) &&
3814 !amdgpu_virt_wait_reset(adev)) {
3815 dev_err(adev->dev, "VF exclusive mode timeout\n");
3816 /* Don't send request since VF is inactive. */
3817 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3818 adev->virt.ops = NULL;
3820 goto release_ras_con;
3822 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3823 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3824 goto release_ras_con;
3827 amdgpu_fence_driver_hw_init(adev);
3830 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3831 adev->gfx.config.max_shader_engines,
3832 adev->gfx.config.max_sh_per_se,
3833 adev->gfx.config.max_cu_per_sh,
3834 adev->gfx.cu_info.number);
3836 adev->accel_working = true;
3838 amdgpu_vm_check_compute_bug(adev);
3840 /* Initialize the buffer migration limit. */
3841 if (amdgpu_moverate >= 0)
3842 max_MBps = amdgpu_moverate;
3844 max_MBps = 8; /* Allow 8 MB/s. */
3845 /* Get a log2 for easy divisions. */
3846 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3848 r = amdgpu_pm_sysfs_init(adev);
3850 adev->pm_sysfs_en = false;
3851 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3853 adev->pm_sysfs_en = true;
3855 r = amdgpu_ucode_sysfs_init(adev);
3857 adev->ucode_sysfs_en = false;
3858 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3860 adev->ucode_sysfs_en = true;
3862 r = amdgpu_psp_sysfs_init(adev);
3864 adev->psp_sysfs_en = false;
3865 if (!amdgpu_sriov_vf(adev))
3866 DRM_ERROR("Creating psp sysfs failed\n");
3868 adev->psp_sysfs_en = true;
3871 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3872 * Otherwise the mgpu fan boost feature will be skipped due to the
3873 * gpu instance is counted less.
3875 amdgpu_register_gpu_instance(adev);
3877 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3878 * explicit gating rather than handling it automatically.
3880 if (!adev->gmc.xgmi.pending_reset) {
3881 r = amdgpu_device_ip_late_init(adev);
3883 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3884 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3885 goto release_ras_con;
3888 amdgpu_ras_resume(adev);
3889 queue_delayed_work(system_wq, &adev->delayed_init_work,
3890 msecs_to_jiffies(AMDGPU_RESUME_MS));
3893 if (amdgpu_sriov_vf(adev))
3894 flush_delayed_work(&adev->delayed_init_work);
3896 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3898 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3900 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3901 r = amdgpu_pmu_init(adev);
3903 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3905 /* Have stored pci confspace at hand for restore in sudden PCI error */
3906 if (amdgpu_device_cache_pci_state(adev->pdev))
3907 pci_restore_state(pdev);
3909 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3910 /* this will fail for cards that aren't VGA class devices, just
3912 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3913 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3915 if (amdgpu_device_supports_px(ddev)) {
3917 vga_switcheroo_register_client(adev->pdev,
3918 &amdgpu_switcheroo_ops, px);
3919 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3922 if (adev->gmc.xgmi.pending_reset)
3923 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3924 msecs_to_jiffies(AMDGPU_RESUME_MS));
3926 amdgpu_device_check_iommu_direct_map(adev);
3931 amdgpu_release_ras_context(adev);
3934 amdgpu_vf_error_trans_all(adev);
3939 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3942 /* Clear all CPU mappings pointing to this device */
3943 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3945 /* Unmap all mapped bars - Doorbell, registers and VRAM */
3946 amdgpu_device_doorbell_fini(adev);
3948 iounmap(adev->rmmio);
3950 if (adev->mman.aper_base_kaddr)
3951 iounmap(adev->mman.aper_base_kaddr);
3952 adev->mman.aper_base_kaddr = NULL;
3954 /* Memory manager related */
3955 if (!adev->gmc.xgmi.connected_to_cpu) {
3956 arch_phys_wc_del(adev->gmc.vram_mtrr);
3957 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3962 * amdgpu_device_fini_hw - tear down the driver
3964 * @adev: amdgpu_device pointer
3966 * Tear down the driver info (all asics).
3967 * Called at driver shutdown.
3969 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3971 dev_info(adev->dev, "amdgpu: finishing device.\n");
3972 flush_delayed_work(&adev->delayed_init_work);
3973 adev->shutdown = true;
3975 /* make sure IB test finished before entering exclusive mode
3976 * to avoid preemption on IB test
3978 if (amdgpu_sriov_vf(adev)) {
3979 amdgpu_virt_request_full_gpu(adev, false);
3980 amdgpu_virt_fini_data_exchange(adev);
3983 /* disable all interrupts */
3984 amdgpu_irq_disable_all(adev);
3985 if (adev->mode_info.mode_config_initialized){
3986 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3987 drm_helper_force_disable_all(adev_to_drm(adev));
3989 drm_atomic_helper_shutdown(adev_to_drm(adev));
3991 amdgpu_fence_driver_hw_fini(adev);
3993 if (adev->mman.initialized)
3994 drain_workqueue(adev->mman.bdev.wq);
3996 if (adev->pm_sysfs_en)
3997 amdgpu_pm_sysfs_fini(adev);
3998 if (adev->ucode_sysfs_en)
3999 amdgpu_ucode_sysfs_fini(adev);
4000 if (adev->psp_sysfs_en)
4001 amdgpu_psp_sysfs_fini(adev);
4002 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4004 /* disable ras feature must before hw fini */
4005 amdgpu_ras_pre_fini(adev);
4007 amdgpu_device_ip_fini_early(adev);
4009 amdgpu_irq_fini_hw(adev);
4011 if (adev->mman.initialized)
4012 ttm_device_clear_dma_mappings(&adev->mman.bdev);
4014 amdgpu_gart_dummy_page_fini(adev);
4016 amdgpu_device_unmap_mmio(adev);
4020 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4024 amdgpu_fence_driver_sw_fini(adev);
4025 amdgpu_device_ip_fini(adev);
4026 release_firmware(adev->firmware.gpu_info_fw);
4027 adev->firmware.gpu_info_fw = NULL;
4028 adev->accel_working = false;
4029 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4031 amdgpu_reset_fini(adev);
4033 /* free i2c buses */
4034 if (!amdgpu_device_has_dc_support(adev))
4035 amdgpu_i2c_fini(adev);
4037 if (amdgpu_emu_mode != 1)
4038 amdgpu_atombios_fini(adev);
4042 if (amdgpu_device_supports_px(adev_to_drm(adev))) {
4043 vga_switcheroo_unregister_client(adev->pdev);
4044 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4046 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4047 vga_client_unregister(adev->pdev);
4049 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4051 iounmap(adev->rmmio);
4053 amdgpu_device_doorbell_fini(adev);
4057 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4058 amdgpu_pmu_fini(adev);
4059 if (adev->mman.discovery_bin)
4060 amdgpu_discovery_fini(adev);
4062 amdgpu_reset_put_reset_domain(adev->reset_domain);
4063 adev->reset_domain = NULL;
4065 kfree(adev->pci_state);
4070 * amdgpu_device_evict_resources - evict device resources
4071 * @adev: amdgpu device object
4073 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4074 * of the vram memory type. Mainly used for evicting device resources
4078 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4082 /* No need to evict vram on APUs for suspend to ram or s2idle */
4083 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4086 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4088 DRM_WARN("evicting device resources failed\n");
4096 * amdgpu_device_suspend - initiate device suspend
4098 * @dev: drm dev pointer
4099 * @fbcon : notify the fbdev of suspend
4101 * Puts the hw in the suspend state (all asics).
4102 * Returns 0 for success or an error on failure.
4103 * Called at driver suspend.
4105 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4107 struct amdgpu_device *adev = drm_to_adev(dev);
4110 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4113 adev->in_suspend = true;
4115 /* Evict the majority of BOs before grabbing the full access */
4116 r = amdgpu_device_evict_resources(adev);
4120 if (amdgpu_sriov_vf(adev)) {
4121 amdgpu_virt_fini_data_exchange(adev);
4122 r = amdgpu_virt_request_full_gpu(adev, false);
4127 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4128 DRM_WARN("smart shift update failed\n");
4130 drm_kms_helper_poll_disable(dev);
4133 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4135 cancel_delayed_work_sync(&adev->delayed_init_work);
4137 amdgpu_ras_suspend(adev);
4139 amdgpu_device_ip_suspend_phase1(adev);
4142 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4144 r = amdgpu_device_evict_resources(adev);
4148 amdgpu_fence_driver_hw_fini(adev);
4150 amdgpu_device_ip_suspend_phase2(adev);
4152 if (amdgpu_sriov_vf(adev))
4153 amdgpu_virt_release_full_gpu(adev, false);
4159 * amdgpu_device_resume - initiate device resume
4161 * @dev: drm dev pointer
4162 * @fbcon : notify the fbdev of resume
4164 * Bring the hw back to operating state (all asics).
4165 * Returns 0 for success or an error on failure.
4166 * Called at driver resume.
4168 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4170 struct amdgpu_device *adev = drm_to_adev(dev);
4173 if (amdgpu_sriov_vf(adev)) {
4174 r = amdgpu_virt_request_full_gpu(adev, true);
4179 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4183 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4186 if (amdgpu_device_need_post(adev)) {
4187 r = amdgpu_device_asic_init(adev);
4189 dev_err(adev->dev, "amdgpu asic init failed\n");
4192 r = amdgpu_device_ip_resume(adev);
4195 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4198 amdgpu_fence_driver_hw_init(adev);
4200 r = amdgpu_device_ip_late_init(adev);
4204 queue_delayed_work(system_wq, &adev->delayed_init_work,
4205 msecs_to_jiffies(AMDGPU_RESUME_MS));
4207 if (!adev->in_s0ix) {
4208 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4214 if (amdgpu_sriov_vf(adev)) {
4215 amdgpu_virt_init_data_exchange(adev);
4216 amdgpu_virt_release_full_gpu(adev, true);
4222 /* Make sure IB tests flushed */
4223 flush_delayed_work(&adev->delayed_init_work);
4226 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4228 drm_kms_helper_poll_enable(dev);
4230 amdgpu_ras_resume(adev);
4232 if (adev->mode_info.num_crtc) {
4234 * Most of the connector probing functions try to acquire runtime pm
4235 * refs to ensure that the GPU is powered on when connector polling is
4236 * performed. Since we're calling this from a runtime PM callback,
4237 * trying to acquire rpm refs will cause us to deadlock.
4239 * Since we're guaranteed to be holding the rpm lock, it's safe to
4240 * temporarily disable the rpm helpers so this doesn't deadlock us.
4243 dev->dev->power.disable_depth++;
4245 if (!adev->dc_enabled)
4246 drm_helper_hpd_irq_event(dev);
4248 drm_kms_helper_hotplug_event(dev);
4250 dev->dev->power.disable_depth--;
4253 adev->in_suspend = false;
4255 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4256 DRM_WARN("smart shift update failed\n");
4262 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4264 * @adev: amdgpu_device pointer
4266 * The list of all the hardware IPs that make up the asic is walked and
4267 * the check_soft_reset callbacks are run. check_soft_reset determines
4268 * if the asic is still hung or not.
4269 * Returns true if any of the IPs are still in a hung state, false if not.
4271 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4274 bool asic_hang = false;
4276 if (amdgpu_sriov_vf(adev))
4279 if (amdgpu_asic_need_full_reset(adev))
4282 for (i = 0; i < adev->num_ip_blocks; i++) {
4283 if (!adev->ip_blocks[i].status.valid)
4285 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4286 adev->ip_blocks[i].status.hang =
4287 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4288 if (adev->ip_blocks[i].status.hang) {
4289 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4297 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4299 * @adev: amdgpu_device pointer
4301 * The list of all the hardware IPs that make up the asic is walked and the
4302 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4303 * handles any IP specific hardware or software state changes that are
4304 * necessary for a soft reset to succeed.
4305 * Returns 0 on success, negative error code on failure.
4307 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4311 for (i = 0; i < adev->num_ip_blocks; i++) {
4312 if (!adev->ip_blocks[i].status.valid)
4314 if (adev->ip_blocks[i].status.hang &&
4315 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4316 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4326 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4328 * @adev: amdgpu_device pointer
4330 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4331 * reset is necessary to recover.
4332 * Returns true if a full asic reset is required, false if not.
4334 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4338 if (amdgpu_asic_need_full_reset(adev))
4341 for (i = 0; i < adev->num_ip_blocks; i++) {
4342 if (!adev->ip_blocks[i].status.valid)
4344 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4345 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4346 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4347 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4348 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4349 if (adev->ip_blocks[i].status.hang) {
4350 dev_info(adev->dev, "Some block need full reset!\n");
4359 * amdgpu_device_ip_soft_reset - do a soft reset
4361 * @adev: amdgpu_device pointer
4363 * The list of all the hardware IPs that make up the asic is walked and the
4364 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4365 * IP specific hardware or software state changes that are necessary to soft
4367 * Returns 0 on success, negative error code on failure.
4369 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4373 for (i = 0; i < adev->num_ip_blocks; i++) {
4374 if (!adev->ip_blocks[i].status.valid)
4376 if (adev->ip_blocks[i].status.hang &&
4377 adev->ip_blocks[i].version->funcs->soft_reset) {
4378 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4388 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4390 * @adev: amdgpu_device pointer
4392 * The list of all the hardware IPs that make up the asic is walked and the
4393 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4394 * handles any IP specific hardware or software state changes that are
4395 * necessary after the IP has been soft reset.
4396 * Returns 0 on success, negative error code on failure.
4398 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4402 for (i = 0; i < adev->num_ip_blocks; i++) {
4403 if (!adev->ip_blocks[i].status.valid)
4405 if (adev->ip_blocks[i].status.hang &&
4406 adev->ip_blocks[i].version->funcs->post_soft_reset)
4407 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4416 * amdgpu_device_recover_vram - Recover some VRAM contents
4418 * @adev: amdgpu_device pointer
4420 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4421 * restore things like GPUVM page tables after a GPU reset where
4422 * the contents of VRAM might be lost.
4425 * 0 on success, negative error code on failure.
4427 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4429 struct dma_fence *fence = NULL, *next = NULL;
4430 struct amdgpu_bo *shadow;
4431 struct amdgpu_bo_vm *vmbo;
4434 if (amdgpu_sriov_runtime(adev))
4435 tmo = msecs_to_jiffies(8000);
4437 tmo = msecs_to_jiffies(100);
4439 dev_info(adev->dev, "recover vram bo from shadow start\n");
4440 mutex_lock(&adev->shadow_list_lock);
4441 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4443 /* No need to recover an evicted BO */
4444 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4445 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4446 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4449 r = amdgpu_bo_restore_shadow(shadow, &next);
4454 tmo = dma_fence_wait_timeout(fence, false, tmo);
4455 dma_fence_put(fence);
4460 } else if (tmo < 0) {
4468 mutex_unlock(&adev->shadow_list_lock);
4471 tmo = dma_fence_wait_timeout(fence, false, tmo);
4472 dma_fence_put(fence);
4474 if (r < 0 || tmo <= 0) {
4475 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4479 dev_info(adev->dev, "recover vram bo from shadow done\n");
4485 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4487 * @adev: amdgpu_device pointer
4488 * @from_hypervisor: request from hypervisor
4490 * do VF FLR and reinitialize Asic
4491 * return 0 means succeeded otherwise failed
4493 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4494 bool from_hypervisor)
4497 struct amdgpu_hive_info *hive = NULL;
4498 int retry_limit = 0;
4501 amdgpu_amdkfd_pre_reset(adev);
4503 if (from_hypervisor)
4504 r = amdgpu_virt_request_full_gpu(adev, true);
4506 r = amdgpu_virt_reset_gpu(adev);
4510 /* Resume IP prior to SMC */
4511 r = amdgpu_device_ip_reinit_early_sriov(adev);
4515 amdgpu_virt_init_data_exchange(adev);
4517 r = amdgpu_device_fw_loading(adev);
4521 /* now we are okay to resume SMC/CP/SDMA */
4522 r = amdgpu_device_ip_reinit_late_sriov(adev);
4526 hive = amdgpu_get_xgmi_hive(adev);
4527 /* Update PSP FW topology after reset */
4528 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4529 r = amdgpu_xgmi_update_topology(hive, adev);
4532 amdgpu_put_xgmi_hive(hive);
4535 amdgpu_irq_gpu_reset_resume_helper(adev);
4536 r = amdgpu_ib_ring_tests(adev);
4538 amdgpu_amdkfd_post_reset(adev);
4542 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4543 amdgpu_inc_vram_lost(adev);
4544 r = amdgpu_device_recover_vram(adev);
4546 amdgpu_virt_release_full_gpu(adev, true);
4548 if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4549 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4553 DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4560 * amdgpu_device_has_job_running - check if there is any job in mirror list
4562 * @adev: amdgpu_device pointer
4564 * check if there is any job in mirror list
4566 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4569 struct drm_sched_job *job;
4571 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4572 struct amdgpu_ring *ring = adev->rings[i];
4574 if (!ring || !ring->sched.thread)
4577 spin_lock(&ring->sched.job_list_lock);
4578 job = list_first_entry_or_null(&ring->sched.pending_list,
4579 struct drm_sched_job, list);
4580 spin_unlock(&ring->sched.job_list_lock);
4588 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4590 * @adev: amdgpu_device pointer
4592 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4595 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4598 if (amdgpu_gpu_recovery == 0)
4601 /* Skip soft reset check in fatal error mode */
4602 if (!amdgpu_ras_is_poison_mode_supported(adev))
4605 if (amdgpu_sriov_vf(adev))
4608 if (amdgpu_gpu_recovery == -1) {
4609 switch (adev->asic_type) {
4610 #ifdef CONFIG_DRM_AMDGPU_SI
4617 #ifdef CONFIG_DRM_AMDGPU_CIK
4624 case CHIP_CYAN_SKILLFISH:
4634 dev_info(adev->dev, "GPU recovery disabled.\n");
4638 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4643 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4645 dev_info(adev->dev, "GPU mode1 reset\n");
4648 pci_clear_master(adev->pdev);
4650 amdgpu_device_cache_pci_state(adev->pdev);
4652 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4653 dev_info(adev->dev, "GPU smu mode1 reset\n");
4654 ret = amdgpu_dpm_mode1_reset(adev);
4656 dev_info(adev->dev, "GPU psp mode1 reset\n");
4657 ret = psp_gpu_reset(adev);
4661 dev_err(adev->dev, "GPU mode1 reset failed\n");
4663 amdgpu_device_load_pci_state(adev->pdev);
4665 /* wait for asic to come out of reset */
4666 for (i = 0; i < adev->usec_timeout; i++) {
4667 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4669 if (memsize != 0xffffffff)
4674 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4678 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4679 struct amdgpu_reset_context *reset_context)
4682 struct amdgpu_job *job = NULL;
4683 bool need_full_reset =
4684 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4686 if (reset_context->reset_req_dev == adev)
4687 job = reset_context->job;
4689 if (amdgpu_sriov_vf(adev)) {
4690 /* stop the data exchange thread */
4691 amdgpu_virt_fini_data_exchange(adev);
4694 amdgpu_fence_driver_isr_toggle(adev, true);
4696 /* block all schedulers and reset given job's ring */
4697 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4698 struct amdgpu_ring *ring = adev->rings[i];
4700 if (!ring || !ring->sched.thread)
4703 /*clear job fence from fence drv to avoid force_completion
4704 *leave NULL and vm flush fence in fence drv */
4705 amdgpu_fence_driver_clear_job_fences(ring);
4707 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4708 amdgpu_fence_driver_force_completion(ring);
4711 amdgpu_fence_driver_isr_toggle(adev, false);
4714 drm_sched_increase_karma(&job->base);
4716 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4717 /* If reset handler not implemented, continue; otherwise return */
4723 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4724 if (!amdgpu_sriov_vf(adev)) {
4726 if (!need_full_reset)
4727 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4729 if (!need_full_reset && amdgpu_gpu_recovery &&
4730 amdgpu_device_ip_check_soft_reset(adev)) {
4731 amdgpu_device_ip_pre_soft_reset(adev);
4732 r = amdgpu_device_ip_soft_reset(adev);
4733 amdgpu_device_ip_post_soft_reset(adev);
4734 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4735 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4736 need_full_reset = true;
4740 if (need_full_reset)
4741 r = amdgpu_device_ip_suspend(adev);
4742 if (need_full_reset)
4743 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4745 clear_bit(AMDGPU_NEED_FULL_RESET,
4746 &reset_context->flags);
4752 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4756 lockdep_assert_held(&adev->reset_domain->sem);
4758 for (i = 0; i < adev->num_regs; i++) {
4759 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4760 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4761 adev->reset_dump_reg_value[i]);
4767 #ifdef CONFIG_DEV_COREDUMP
4768 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4769 size_t count, void *data, size_t datalen)
4771 struct drm_printer p;
4772 struct amdgpu_device *adev = data;
4773 struct drm_print_iterator iter;
4778 iter.start = offset;
4779 iter.remain = count;
4781 p = drm_coredump_printer(&iter);
4783 drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4784 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4785 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4786 drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4787 if (adev->reset_task_info.pid)
4788 drm_printf(&p, "process_name: %s PID: %d\n",
4789 adev->reset_task_info.process_name,
4790 adev->reset_task_info.pid);
4792 if (adev->reset_vram_lost)
4793 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4794 if (adev->num_regs) {
4795 drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
4797 for (i = 0; i < adev->num_regs; i++)
4798 drm_printf(&p, "0x%08x: 0x%08x\n",
4799 adev->reset_dump_reg_list[i],
4800 adev->reset_dump_reg_value[i]);
4803 return count - iter.remain;
4806 static void amdgpu_devcoredump_free(void *data)
4810 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4812 struct drm_device *dev = adev_to_drm(adev);
4814 ktime_get_ts64(&adev->reset_time);
4815 dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4816 amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4820 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4821 struct amdgpu_reset_context *reset_context)
4823 struct amdgpu_device *tmp_adev = NULL;
4824 bool need_full_reset, skip_hw_reset, vram_lost = false;
4826 bool gpu_reset_for_dev_remove = 0;
4828 /* Try reset handler method first */
4829 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4831 amdgpu_reset_reg_dumps(tmp_adev);
4833 reset_context->reset_device_list = device_list_handle;
4834 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4835 /* If reset handler not implemented, continue; otherwise return */
4841 /* Reset handler not implemented, use the default method */
4843 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4844 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4846 gpu_reset_for_dev_remove =
4847 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4848 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4851 * ASIC reset has to be done on all XGMI hive nodes ASAP
4852 * to allow proper links negotiation in FW (within 1 sec)
4854 if (!skip_hw_reset && need_full_reset) {
4855 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4856 /* For XGMI run all resets in parallel to speed up the process */
4857 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4858 tmp_adev->gmc.xgmi.pending_reset = false;
4859 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4862 r = amdgpu_asic_reset(tmp_adev);
4865 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4866 r, adev_to_drm(tmp_adev)->unique);
4871 /* For XGMI wait for all resets to complete before proceed */
4873 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4874 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4875 flush_work(&tmp_adev->xgmi_reset_work);
4876 r = tmp_adev->asic_reset_res;
4884 if (!r && amdgpu_ras_intr_triggered()) {
4885 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4886 if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4887 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4888 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4891 amdgpu_ras_intr_cleared();
4894 /* Since the mode1 reset affects base ip blocks, the
4895 * phase1 ip blocks need to be resumed. Otherwise there
4896 * will be a BIOS signature error and the psp bootloader
4897 * can't load kdb on the next amdgpu install.
4899 if (gpu_reset_for_dev_remove) {
4900 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
4901 amdgpu_device_ip_resume_phase1(tmp_adev);
4906 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4907 if (need_full_reset) {
4909 r = amdgpu_device_asic_init(tmp_adev);
4911 dev_warn(tmp_adev->dev, "asic atom init failed!");
4913 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4914 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4918 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4922 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4923 #ifdef CONFIG_DEV_COREDUMP
4924 tmp_adev->reset_vram_lost = vram_lost;
4925 memset(&tmp_adev->reset_task_info, 0,
4926 sizeof(tmp_adev->reset_task_info));
4927 if (reset_context->job && reset_context->job->vm)
4928 tmp_adev->reset_task_info =
4929 reset_context->job->vm->task_info;
4930 amdgpu_reset_capture_coredumpm(tmp_adev);
4933 DRM_INFO("VRAM is lost due to GPU reset!\n");
4934 amdgpu_inc_vram_lost(tmp_adev);
4937 r = amdgpu_device_fw_loading(tmp_adev);
4941 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4946 amdgpu_device_fill_reset_magic(tmp_adev);
4949 * Add this ASIC as tracked as reset was already
4950 * complete successfully.
4952 amdgpu_register_gpu_instance(tmp_adev);
4954 if (!reset_context->hive &&
4955 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4956 amdgpu_xgmi_add_device(tmp_adev);
4958 r = amdgpu_device_ip_late_init(tmp_adev);
4962 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4965 * The GPU enters bad state once faulty pages
4966 * by ECC has reached the threshold, and ras
4967 * recovery is scheduled next. So add one check
4968 * here to break recovery if it indeed exceeds
4969 * bad page threshold, and remind user to
4970 * retire this GPU or setting one bigger
4971 * bad_page_threshold value to fix this once
4972 * probing driver again.
4974 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4976 amdgpu_ras_resume(tmp_adev);
4982 /* Update PSP FW topology after reset */
4983 if (reset_context->hive &&
4984 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4985 r = amdgpu_xgmi_update_topology(
4986 reset_context->hive, tmp_adev);
4992 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4993 r = amdgpu_ib_ring_tests(tmp_adev);
4995 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4996 need_full_reset = true;
5003 r = amdgpu_device_recover_vram(tmp_adev);
5005 tmp_adev->asic_reset_res = r;
5009 if (need_full_reset)
5010 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5012 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5016 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5019 switch (amdgpu_asic_reset_method(adev)) {
5020 case AMD_RESET_METHOD_MODE1:
5021 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5023 case AMD_RESET_METHOD_MODE2:
5024 adev->mp1_state = PP_MP1_STATE_RESET;
5027 adev->mp1_state = PP_MP1_STATE_NONE;
5032 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5034 amdgpu_vf_error_trans_all(adev);
5035 adev->mp1_state = PP_MP1_STATE_NONE;
5038 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5040 struct pci_dev *p = NULL;
5042 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5043 adev->pdev->bus->number, 1);
5045 pm_runtime_enable(&(p->dev));
5046 pm_runtime_resume(&(p->dev));
5052 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5054 enum amd_reset_method reset_method;
5055 struct pci_dev *p = NULL;
5059 * For now, only BACO and mode1 reset are confirmed
5060 * to suffer the audio issue without proper suspended.
5062 reset_method = amdgpu_asic_reset_method(adev);
5063 if ((reset_method != AMD_RESET_METHOD_BACO) &&
5064 (reset_method != AMD_RESET_METHOD_MODE1))
5067 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5068 adev->pdev->bus->number, 1);
5072 expires = pm_runtime_autosuspend_expiration(&(p->dev));
5075 * If we cannot get the audio device autosuspend delay,
5076 * a fixed 4S interval will be used. Considering 3S is
5077 * the audio controller default autosuspend delay setting.
5078 * 4S used here is guaranteed to cover that.
5080 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5082 while (!pm_runtime_status_suspended(&(p->dev))) {
5083 if (!pm_runtime_suspend(&(p->dev)))
5086 if (expires < ktime_get_mono_fast_ns()) {
5087 dev_warn(adev->dev, "failed to suspend display audio\n");
5089 /* TODO: abort the succeeding gpu reset? */
5094 pm_runtime_disable(&(p->dev));
5100 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5102 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5104 #if defined(CONFIG_DEBUG_FS)
5105 if (!amdgpu_sriov_vf(adev))
5106 cancel_work(&adev->reset_work);
5110 cancel_work(&adev->kfd.reset_work);
5112 if (amdgpu_sriov_vf(adev))
5113 cancel_work(&adev->virt.flr_work);
5115 if (con && adev->ras_enabled)
5116 cancel_work(&con->recovery_work);
5121 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5123 * @adev: amdgpu_device pointer
5124 * @job: which job trigger hang
5126 * Attempt to reset the GPU if it has hung (all asics).
5127 * Attempt to do soft-reset or full-reset and reinitialize Asic
5128 * Returns 0 for success or an error on failure.
5131 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5132 struct amdgpu_job *job,
5133 struct amdgpu_reset_context *reset_context)
5135 struct list_head device_list, *device_list_handle = NULL;
5136 bool job_signaled = false;
5137 struct amdgpu_hive_info *hive = NULL;
5138 struct amdgpu_device *tmp_adev = NULL;
5140 bool need_emergency_restart = false;
5141 bool audio_suspended = false;
5142 bool gpu_reset_for_dev_remove = false;
5144 gpu_reset_for_dev_remove =
5145 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5146 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5149 * Special case: RAS triggered and full reset isn't supported
5151 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5154 * Flush RAM to disk so that after reboot
5155 * the user can read log and see why the system rebooted.
5157 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5158 DRM_WARN("Emergency reboot.");
5161 emergency_restart();
5164 dev_info(adev->dev, "GPU %s begin!\n",
5165 need_emergency_restart ? "jobs stop":"reset");
5167 if (!amdgpu_sriov_vf(adev))
5168 hive = amdgpu_get_xgmi_hive(adev);
5170 mutex_lock(&hive->hive_lock);
5172 reset_context->job = job;
5173 reset_context->hive = hive;
5175 * Build list of devices to reset.
5176 * In case we are in XGMI hive mode, resort the device list
5177 * to put adev in the 1st position.
5179 INIT_LIST_HEAD(&device_list);
5180 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5181 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5182 list_add_tail(&tmp_adev->reset_list, &device_list);
5183 if (gpu_reset_for_dev_remove && adev->shutdown)
5184 tmp_adev->shutdown = true;
5186 if (!list_is_first(&adev->reset_list, &device_list))
5187 list_rotate_to_front(&adev->reset_list, &device_list);
5188 device_list_handle = &device_list;
5190 list_add_tail(&adev->reset_list, &device_list);
5191 device_list_handle = &device_list;
5194 /* We need to lock reset domain only once both for XGMI and single device */
5195 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5197 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5199 /* block all schedulers and reset given job's ring */
5200 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5202 amdgpu_device_set_mp1_state(tmp_adev);
5205 * Try to put the audio codec into suspend state
5206 * before gpu reset started.
5208 * Due to the power domain of the graphics device
5209 * is shared with AZ power domain. Without this,
5210 * we may change the audio hardware from behind
5211 * the audio driver's back. That will trigger
5212 * some audio codec errors.
5214 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5215 audio_suspended = true;
5217 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5219 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5221 if (!amdgpu_sriov_vf(tmp_adev))
5222 amdgpu_amdkfd_pre_reset(tmp_adev);
5225 * Mark these ASICs to be reseted as untracked first
5226 * And add them back after reset completed
5228 amdgpu_unregister_gpu_instance(tmp_adev);
5230 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5232 /* disable ras on ALL IPs */
5233 if (!need_emergency_restart &&
5234 amdgpu_device_ip_need_full_reset(tmp_adev))
5235 amdgpu_ras_suspend(tmp_adev);
5237 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5238 struct amdgpu_ring *ring = tmp_adev->rings[i];
5240 if (!ring || !ring->sched.thread)
5243 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5245 if (need_emergency_restart)
5246 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5248 atomic_inc(&tmp_adev->gpu_reset_counter);
5251 if (need_emergency_restart)
5252 goto skip_sched_resume;
5255 * Must check guilty signal here since after this point all old
5256 * HW fences are force signaled.
5258 * job->base holds a reference to parent fence
5260 if (job && dma_fence_is_signaled(&job->hw_fence)) {
5261 job_signaled = true;
5262 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5266 retry: /* Rest of adevs pre asic reset from XGMI hive. */
5267 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5268 if (gpu_reset_for_dev_remove) {
5269 /* Workaroud for ASICs need to disable SMC first */
5270 amdgpu_device_smu_fini_early(tmp_adev);
5272 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5273 /*TODO Should we stop ?*/
5275 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5276 r, adev_to_drm(tmp_adev)->unique);
5277 tmp_adev->asic_reset_res = r;
5281 * Drop all pending non scheduler resets. Scheduler resets
5282 * were already dropped during drm_sched_stop
5284 amdgpu_device_stop_pending_resets(tmp_adev);
5287 /* Actual ASIC resets if needed.*/
5288 /* Host driver will handle XGMI hive reset for SRIOV */
5289 if (amdgpu_sriov_vf(adev)) {
5290 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5292 adev->asic_reset_res = r;
5294 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5295 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5296 amdgpu_ras_resume(adev);
5298 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5299 if (r && r == -EAGAIN)
5302 if (!r && gpu_reset_for_dev_remove)
5308 /* Post ASIC reset for all devs .*/
5309 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5311 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5312 struct amdgpu_ring *ring = tmp_adev->rings[i];
5314 if (!ring || !ring->sched.thread)
5317 drm_sched_start(&ring->sched, true);
5320 if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
5321 amdgpu_mes_self_test(tmp_adev);
5323 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5324 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5327 if (tmp_adev->asic_reset_res)
5328 r = tmp_adev->asic_reset_res;
5330 tmp_adev->asic_reset_res = 0;
5333 /* bad news, how to tell it to userspace ? */
5334 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5335 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5337 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5338 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5339 DRM_WARN("smart shift update failed\n");
5344 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5345 /* unlock kfd: SRIOV would do it separately */
5346 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5347 amdgpu_amdkfd_post_reset(tmp_adev);
5349 /* kfd_post_reset will do nothing if kfd device is not initialized,
5350 * need to bring up kfd here if it's not be initialized before
5352 if (!adev->kfd.init_complete)
5353 amdgpu_amdkfd_device_init(adev);
5355 if (audio_suspended)
5356 amdgpu_device_resume_display_audio(tmp_adev);
5358 amdgpu_device_unset_mp1_state(tmp_adev);
5360 amdgpu_ras_set_error_query_ready(tmp_adev, true);
5364 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5366 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5369 mutex_unlock(&hive->hive_lock);
5370 amdgpu_put_xgmi_hive(hive);
5374 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5376 atomic_set(&adev->reset_domain->reset_res, r);
5381 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5383 * @adev: amdgpu_device pointer
5385 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5386 * and lanes) of the slot the device is in. Handles APUs and
5387 * virtualized environments where PCIE config space may not be available.
5389 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5391 struct pci_dev *pdev;
5392 enum pci_bus_speed speed_cap, platform_speed_cap;
5393 enum pcie_link_width platform_link_width;
5395 if (amdgpu_pcie_gen_cap)
5396 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5398 if (amdgpu_pcie_lane_cap)
5399 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5401 /* covers APUs as well */
5402 if (pci_is_root_bus(adev->pdev->bus)) {
5403 if (adev->pm.pcie_gen_mask == 0)
5404 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5405 if (adev->pm.pcie_mlw_mask == 0)
5406 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5410 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5413 pcie_bandwidth_available(adev->pdev, NULL,
5414 &platform_speed_cap, &platform_link_width);
5416 if (adev->pm.pcie_gen_mask == 0) {
5419 speed_cap = pcie_get_speed_cap(pdev);
5420 if (speed_cap == PCI_SPEED_UNKNOWN) {
5421 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5422 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5423 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5425 if (speed_cap == PCIE_SPEED_32_0GT)
5426 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5427 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5428 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5429 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5430 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5431 else if (speed_cap == PCIE_SPEED_16_0GT)
5432 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5433 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5434 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5435 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5436 else if (speed_cap == PCIE_SPEED_8_0GT)
5437 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5438 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5439 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5440 else if (speed_cap == PCIE_SPEED_5_0GT)
5441 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5442 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5444 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5447 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5448 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5449 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5451 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5452 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5453 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5454 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5455 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5456 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5457 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5458 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5459 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5460 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5461 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5462 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5463 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5464 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5465 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5466 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5467 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5468 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5470 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5474 if (adev->pm.pcie_mlw_mask == 0) {
5475 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5476 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5478 switch (platform_link_width) {
5480 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5481 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5482 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5483 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5484 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5485 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5486 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5489 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5490 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5491 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5492 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5493 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5494 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5497 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5498 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5499 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5500 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5501 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5504 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5505 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5506 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5507 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5510 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5511 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5512 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5515 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5516 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5519 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5529 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5531 * @adev: amdgpu_device pointer
5532 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5534 * Return true if @peer_adev can access (DMA) @adev through the PCIe
5535 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5538 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5539 struct amdgpu_device *peer_adev)
5541 #ifdef CONFIG_HSA_AMD_P2P
5542 uint64_t address_mask = peer_adev->dev->dma_mask ?
5543 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5544 resource_size_t aper_limit =
5545 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5547 !adev->gmc.xgmi.connected_to_cpu &&
5548 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5550 return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5551 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5552 !(adev->gmc.aper_base & address_mask ||
5553 aper_limit & address_mask));
5559 int amdgpu_device_baco_enter(struct drm_device *dev)
5561 struct amdgpu_device *adev = drm_to_adev(dev);
5562 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5564 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5567 if (ras && adev->ras_enabled &&
5568 adev->nbio.funcs->enable_doorbell_interrupt)
5569 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5571 return amdgpu_dpm_baco_enter(adev);
5574 int amdgpu_device_baco_exit(struct drm_device *dev)
5576 struct amdgpu_device *adev = drm_to_adev(dev);
5577 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5580 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5583 ret = amdgpu_dpm_baco_exit(adev);
5587 if (ras && adev->ras_enabled &&
5588 adev->nbio.funcs->enable_doorbell_interrupt)
5589 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5591 if (amdgpu_passthrough(adev) &&
5592 adev->nbio.funcs->clear_doorbell_interrupt)
5593 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5599 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5600 * @pdev: PCI device struct
5601 * @state: PCI channel state
5603 * Description: Called when a PCI error is detected.
5605 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5607 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5609 struct drm_device *dev = pci_get_drvdata(pdev);
5610 struct amdgpu_device *adev = drm_to_adev(dev);
5613 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5615 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5616 DRM_WARN("No support for XGMI hive yet...");
5617 return PCI_ERS_RESULT_DISCONNECT;
5620 adev->pci_channel_state = state;
5623 case pci_channel_io_normal:
5624 return PCI_ERS_RESULT_CAN_RECOVER;
5625 /* Fatal error, prepare for slot reset */
5626 case pci_channel_io_frozen:
5628 * Locking adev->reset_domain->sem will prevent any external access
5629 * to GPU during PCI error recovery
5631 amdgpu_device_lock_reset_domain(adev->reset_domain);
5632 amdgpu_device_set_mp1_state(adev);
5635 * Block any work scheduling as we do for regular GPU reset
5636 * for the duration of the recovery
5638 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5639 struct amdgpu_ring *ring = adev->rings[i];
5641 if (!ring || !ring->sched.thread)
5644 drm_sched_stop(&ring->sched, NULL);
5646 atomic_inc(&adev->gpu_reset_counter);
5647 return PCI_ERS_RESULT_NEED_RESET;
5648 case pci_channel_io_perm_failure:
5649 /* Permanent error, prepare for device removal */
5650 return PCI_ERS_RESULT_DISCONNECT;
5653 return PCI_ERS_RESULT_NEED_RESET;
5657 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5658 * @pdev: pointer to PCI device
5660 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5663 DRM_INFO("PCI error: mmio enabled callback!!\n");
5665 /* TODO - dump whatever for debugging purposes */
5667 /* This called only if amdgpu_pci_error_detected returns
5668 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5669 * works, no need to reset slot.
5672 return PCI_ERS_RESULT_RECOVERED;
5676 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5677 * @pdev: PCI device struct
5679 * Description: This routine is called by the pci error recovery
5680 * code after the PCI slot has been reset, just before we
5681 * should resume normal operations.
5683 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5685 struct drm_device *dev = pci_get_drvdata(pdev);
5686 struct amdgpu_device *adev = drm_to_adev(dev);
5688 struct amdgpu_reset_context reset_context;
5690 struct list_head device_list;
5692 DRM_INFO("PCI error: slot reset callback!!\n");
5694 memset(&reset_context, 0, sizeof(reset_context));
5696 INIT_LIST_HEAD(&device_list);
5697 list_add_tail(&adev->reset_list, &device_list);
5699 /* wait for asic to come out of reset */
5702 /* Restore PCI confspace */
5703 amdgpu_device_load_pci_state(pdev);
5705 /* confirm ASIC came out of reset */
5706 for (i = 0; i < adev->usec_timeout; i++) {
5707 memsize = amdgpu_asic_get_config_memsize(adev);
5709 if (memsize != 0xffffffff)
5713 if (memsize == 0xffffffff) {
5718 reset_context.method = AMD_RESET_METHOD_NONE;
5719 reset_context.reset_req_dev = adev;
5720 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5721 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5723 adev->no_hw_access = true;
5724 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5725 adev->no_hw_access = false;
5729 r = amdgpu_do_asic_reset(&device_list, &reset_context);
5733 if (amdgpu_device_cache_pci_state(adev->pdev))
5734 pci_restore_state(adev->pdev);
5736 DRM_INFO("PCIe error recovery succeeded\n");
5738 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5739 amdgpu_device_unset_mp1_state(adev);
5740 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5743 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5747 * amdgpu_pci_resume() - resume normal ops after PCI reset
5748 * @pdev: pointer to PCI device
5750 * Called when the error recovery driver tells us that its
5751 * OK to resume normal operation.
5753 void amdgpu_pci_resume(struct pci_dev *pdev)
5755 struct drm_device *dev = pci_get_drvdata(pdev);
5756 struct amdgpu_device *adev = drm_to_adev(dev);
5760 DRM_INFO("PCI error: resume callback!!\n");
5762 /* Only continue execution for the case of pci_channel_io_frozen */
5763 if (adev->pci_channel_state != pci_channel_io_frozen)
5766 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5767 struct amdgpu_ring *ring = adev->rings[i];
5769 if (!ring || !ring->sched.thread)
5772 drm_sched_start(&ring->sched, true);
5775 amdgpu_device_unset_mp1_state(adev);
5776 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5779 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5781 struct drm_device *dev = pci_get_drvdata(pdev);
5782 struct amdgpu_device *adev = drm_to_adev(dev);
5785 r = pci_save_state(pdev);
5787 kfree(adev->pci_state);
5789 adev->pci_state = pci_store_saved_state(pdev);
5791 if (!adev->pci_state) {
5792 DRM_ERROR("Failed to store PCI saved state");
5796 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5803 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5805 struct drm_device *dev = pci_get_drvdata(pdev);
5806 struct amdgpu_device *adev = drm_to_adev(dev);
5809 if (!adev->pci_state)
5812 r = pci_load_saved_state(pdev, adev->pci_state);
5815 pci_restore_state(pdev);
5817 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5824 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5825 struct amdgpu_ring *ring)
5827 #ifdef CONFIG_X86_64
5828 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5831 if (adev->gmc.xgmi.connected_to_cpu)
5834 if (ring && ring->funcs->emit_hdp_flush)
5835 amdgpu_ring_emit_hdp_flush(ring);
5837 amdgpu_asic_flush_hdp(adev, ring);
5840 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5841 struct amdgpu_ring *ring)
5843 #ifdef CONFIG_X86_64
5844 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5847 if (adev->gmc.xgmi.connected_to_cpu)
5850 amdgpu_asic_invalidate_hdp(adev, ring);
5853 int amdgpu_in_reset(struct amdgpu_device *adev)
5855 return atomic_read(&adev->reset_domain->in_gpu_reset);
5859 * amdgpu_device_halt() - bring hardware to some kind of halt state
5861 * @adev: amdgpu_device pointer
5863 * Bring hardware to some kind of halt state so that no one can touch it
5864 * any more. It will help to maintain error context when error occurred.
5865 * Compare to a simple hang, the system will keep stable at least for SSH
5866 * access. Then it should be trivial to inspect the hardware state and
5867 * see what's going on. Implemented as following:
5869 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5870 * clears all CPU mappings to device, disallows remappings through page faults
5871 * 2. amdgpu_irq_disable_all() disables all interrupts
5872 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5873 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5874 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5875 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5876 * flush any in flight DMA operations
5878 void amdgpu_device_halt(struct amdgpu_device *adev)
5880 struct pci_dev *pdev = adev->pdev;
5881 struct drm_device *ddev = adev_to_drm(adev);
5883 drm_dev_unplug(ddev);
5885 amdgpu_irq_disable_all(adev);
5887 amdgpu_fence_driver_hw_fini(adev);
5889 adev->no_hw_access = true;
5891 amdgpu_device_unmap_mmio(adev);
5893 pci_disable_device(pdev);
5894 pci_wait_for_pending_transaction(pdev);
5897 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5900 unsigned long flags, address, data;
5903 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5904 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5906 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5907 WREG32(address, reg * 4);
5908 (void)RREG32(address);
5910 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5914 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5917 unsigned long flags, address, data;
5919 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5920 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5922 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5923 WREG32(address, reg * 4);
5924 (void)RREG32(address);
5927 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5931 * amdgpu_device_switch_gang - switch to a new gang
5932 * @adev: amdgpu_device pointer
5933 * @gang: the gang to switch to
5935 * Try to switch to a new gang.
5936 * Returns: NULL if we switched to the new gang or a reference to the current
5939 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
5940 struct dma_fence *gang)
5942 struct dma_fence *old = NULL;
5947 old = dma_fence_get_rcu_safe(&adev->gang_submit);
5953 if (!dma_fence_is_signaled(old))
5956 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
5963 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
5965 switch (adev->asic_type) {
5966 #ifdef CONFIG_DRM_AMDGPU_SI
5970 /* chips with no display hardware */
5972 #ifdef CONFIG_DRM_AMDGPU_SI
5978 #ifdef CONFIG_DRM_AMDGPU_CIK
5987 case CHIP_POLARIS10:
5988 case CHIP_POLARIS11:
5989 case CHIP_POLARIS12:
5993 /* chips with display hardware */
5997 if (!adev->ip_versions[DCE_HWIP][0] ||
5998 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))