]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drm/nouveau/kms: Don't change EDID when it hasn't actually changed
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
index a027a8f7b28193c443788a1fec0f99acae702b69..4204cda680f5668d6d0c284e285e5852c3b17c22 100644 (file)
@@ -80,6 +80,8 @@ MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/sienna_cichlid_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/navy_flounder_gpu_info.bin");
 
 #define AMDGPU_RESUME_MS               2000
 
@@ -112,6 +114,8 @@ const char *amdgpu_asic_name[] = {
        "NAVI10",
        "NAVI14",
        "NAVI12",
+       "SIENNA_CICHLID",
+       "NAVY_FLOUNDER",
        "LAST",
 };
 
@@ -299,10 +303,10 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 }
 
 /*
- * device register access helper functions.
+ * MMIO register access helper functions.
  */
 /**
- * amdgpu_device_rreg - read a register
+ * amdgpu_mm_rreg - read a memory mapped IO register
  *
  * @adev: amdgpu_device pointer
  * @reg: dword aligned register offset
@@ -310,8 +314,8 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
  *
  * Returns the 32 bit value from the offset specified.
  */
-uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg,
-                           uint32_t acc_flags)
+uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
+                       uint32_t acc_flags)
 {
        uint32_t ret;
 
@@ -320,9 +324,15 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg,
 
        if ((reg * 4) < adev->rmmio_size)
                ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
-       else
-               ret = adev->pcie_rreg(adev, (reg * 4));
-       trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
+       else {
+               unsigned long flags;
+
+               spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+               writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
+               ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
+               spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+       }
+       trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
        return ret;
 }
 
@@ -368,19 +378,24 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
                BUG();
 }
 
-void static inline amdgpu_device_wreg_no_kiq(struct amdgpu_device *adev, uint32_t reg,
-                                            uint32_t v, uint32_t acc_flags)
+void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
 {
-       trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
+       trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
 
        if ((reg * 4) < adev->rmmio_size)
                writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
-       else
-               adev->pcie_wreg(adev, (reg * 4), v);
+       else {
+               unsigned long flags;
+
+               spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+               writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
+               writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
+               spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+       }
 }
 
 /**
- * amdgpu_device_wreg - write to a register
+ * amdgpu_mm_wreg - write to a memory mapped IO register
  *
  * @adev: amdgpu_device pointer
  * @reg: dword aligned register offset
@@ -389,13 +404,13 @@ void static inline amdgpu_device_wreg_no_kiq(struct amdgpu_device *adev, uint32_
  *
  * Writes the value specified to the offset specified.
  */
-void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
-                       uint32_t acc_flags)
+void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+                   uint32_t acc_flags)
 {
        if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
                return amdgpu_kiq_wreg(adev, reg, v);
 
-       amdgpu_device_wreg_no_kiq(adev, reg, v, acc_flags);
+       amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
 }
 
 /*
@@ -414,7 +429,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t
                        return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
        }
 
-       amdgpu_device_wreg_no_kiq(adev, reg, v, acc_flags);
+       amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
 }
 
 /**
@@ -907,6 +922,11 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
        if (amdgpu_sriov_vf(adev))
                return 0;
 
+       /* skip if the bios has already enabled large BAR */
+       if (adev->gmc.real_vram_size &&
+           (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
+               return 0;
+
        /* Check if the root BUS has 64bit memory resources */
        root = adev->pdev->bus;
        while (root->parent)
@@ -1159,6 +1179,16 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
                amdgpu_vm_fragment_size = -1;
        }
 
+       if (amdgpu_sched_hw_submission < 2) {
+               dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
+                        amdgpu_sched_hw_submission);
+               amdgpu_sched_hw_submission = 2;
+       } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
+               dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
+                        amdgpu_sched_hw_submission);
+               amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
+       }
+
        amdgpu_device_check_smu_prv_buffer_size(adev);
 
        amdgpu_device_check_vm_size(adev);
@@ -1527,22 +1557,25 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
 {
        const char *chip_name;
-       char fw_name[30];
+       char fw_name[40];
        int err;
        const struct gpu_info_firmware_header_v1_0 *hdr;
 
        adev->firmware.gpu_info_fw = NULL;
 
+       if (adev->discovery_bin) {
+               amdgpu_discovery_get_gfx_info(adev);
+
+               /*
+                * FIXME: The bounding box is still needed by Navi12, so
+                * temporarily read it from gpu_info firmware. Should be droped
+                * when DAL no longer needs it.
+                */
+               if (adev->asic_type != CHIP_NAVI12)
+                       return 0;
+       }
+
        switch (adev->asic_type) {
-       case CHIP_TOPAZ:
-       case CHIP_TONGA:
-       case CHIP_FIJI:
-       case CHIP_POLARIS10:
-       case CHIP_POLARIS11:
-       case CHIP_POLARIS12:
-       case CHIP_VEGAM:
-       case CHIP_CARRIZO:
-       case CHIP_STONEY:
 #ifdef CONFIG_DRM_AMDGPU_SI
        case CHIP_VERDE:
        case CHIP_TAHITI:
@@ -1557,6 +1590,15 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
        case CHIP_KABINI:
        case CHIP_MULLINS:
 #endif
+       case CHIP_TOPAZ:
+       case CHIP_TONGA:
+       case CHIP_FIJI:
+       case CHIP_POLARIS10:
+       case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
+       case CHIP_VEGAM:
+       case CHIP_CARRIZO:
+       case CHIP_STONEY:
        case CHIP_VEGA20:
        default:
                return 0;
@@ -1589,6 +1631,12 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
        case CHIP_NAVI12:
                chip_name = "navi12";
                break;
+       case CHIP_SIENNA_CICHLID:
+               chip_name = "sienna_cichlid";
+               break;
+       case CHIP_NAVY_FLOUNDER:
+               chip_name = "navy_flounder";
+               break;
        }
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
@@ -1617,10 +1665,11 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
                        (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
                                                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 
-               if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
-                       amdgpu_discovery_get_gfx_info(adev);
+               /*
+                * Should be droped when DAL no longer needs it.
+                */
+               if (adev->asic_type == CHIP_NAVI12)
                        goto parse_soc_bounding_box;
-               }
 
                adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
                adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
@@ -1653,7 +1702,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
 parse_soc_bounding_box:
                /*
                 * soc bounding box info is not integrated in disocovery table,
-                * we always need to parse it from gpu info firmware.
+                * we always need to parse it from gpu info firmware if needed.
                 */
                if (hdr->version_minor == 2) {
                        const struct gpu_info_firmware_v1_2 *gpu_info_fw =
@@ -1689,25 +1738,13 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
 
        amdgpu_device_enable_virtual_display(adev);
 
-       switch (adev->asic_type) {
-       case CHIP_TOPAZ:
-       case CHIP_TONGA:
-       case CHIP_FIJI:
-       case CHIP_POLARIS10:
-       case CHIP_POLARIS11:
-       case CHIP_POLARIS12:
-       case CHIP_VEGAM:
-       case CHIP_CARRIZO:
-       case CHIP_STONEY:
-               if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
-                       adev->family = AMDGPU_FAMILY_CZ;
-               else
-                       adev->family = AMDGPU_FAMILY_VI;
-
-               r = vi_set_ip_blocks(adev);
+       if (amdgpu_sriov_vf(adev)) {
+               r = amdgpu_virt_request_full_gpu(adev, true);
                if (r)
                        return r;
-               break;
+       }
+
+       switch (adev->asic_type) {
 #ifdef CONFIG_DRM_AMDGPU_SI
        case CHIP_VERDE:
        case CHIP_TAHITI:
@@ -1726,24 +1763,41 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
        case CHIP_KAVERI:
        case CHIP_KABINI:
        case CHIP_MULLINS:
-               if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
-                       adev->family = AMDGPU_FAMILY_CI;
-               else
+               if (adev->flags & AMD_IS_APU)
                        adev->family = AMDGPU_FAMILY_KV;
+               else
+                       adev->family = AMDGPU_FAMILY_CI;
 
                r = cik_set_ip_blocks(adev);
                if (r)
                        return r;
                break;
 #endif
+       case CHIP_TOPAZ:
+       case CHIP_TONGA:
+       case CHIP_FIJI:
+       case CHIP_POLARIS10:
+       case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
+       case CHIP_VEGAM:
+       case CHIP_CARRIZO:
+       case CHIP_STONEY:
+               if (adev->flags & AMD_IS_APU)
+                       adev->family = AMDGPU_FAMILY_CZ;
+               else
+                       adev->family = AMDGPU_FAMILY_VI;
+
+               r = vi_set_ip_blocks(adev);
+               if (r)
+                       return r;
+               break;
        case CHIP_VEGA10:
        case CHIP_VEGA12:
        case CHIP_VEGA20:
        case CHIP_RAVEN:
        case CHIP_ARCTURUS:
        case CHIP_RENOIR:
-               if (adev->asic_type == CHIP_RAVEN ||
-                   adev->asic_type == CHIP_RENOIR)
+               if (adev->flags & AMD_IS_APU)
                        adev->family = AMDGPU_FAMILY_RV;
                else
                        adev->family = AMDGPU_FAMILY_AI;
@@ -1755,6 +1809,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
        case  CHIP_NAVI10:
        case  CHIP_NAVI14:
        case  CHIP_NAVI12:
+       case  CHIP_SIENNA_CICHLID:
+       case  CHIP_NAVY_FLOUNDER:
                adev->family = AMDGPU_FAMILY_NV;
 
                r = nv_set_ip_blocks(adev);
@@ -1768,31 +1824,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
 
        amdgpu_amdkfd_device_probe(adev);
 
-       if (amdgpu_sriov_vf(adev)) {
-               /* handle vbios stuff prior full access mode for new handshake */
-               if (adev->virt.req_init_data_ver == 1) {
-                       if (!amdgpu_get_bios(adev)) {
-                               DRM_ERROR("failed to get vbios\n");
-                               return -EINVAL;
-                       }
-
-                       r = amdgpu_atombios_init(adev);
-                       if (r) {
-                               dev_err(adev->dev, "amdgpu_atombios_init failed\n");
-                               amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
-                               return r;
-                       }
-               }
-       }
-
-       /* we need to send REQ_GPU here for legacy handshaker otherwise the vbios
-        * will not be prepared by host for this VF */
-       if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver < 1) {
-               r = amdgpu_virt_request_full_gpu(adev, true);
-               if (r)
-                       return r;
-       }
-
        adev->pm.pp_feature = amdgpu_pp_feature_mask;
        if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
                adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
@@ -1824,10 +1855,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
                        if (r)
                                return r;
 
-                       /* skip vbios handling for new handshake */
-                       if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver == 1)
-                               continue;
-
                        /* Read BIOS */
                        if (!amdgpu_get_bios(adev))
                                return -EINVAL;
@@ -1954,12 +1981,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
        if (r)
                return r;
 
-       if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver > 0) {
-               r = amdgpu_virt_request_full_gpu(adev, true);
-               if (r)
-                       return -EAGAIN;
-       }
-
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if (!adev->ip_blocks[i].status.valid)
                        continue;
@@ -2308,6 +2329,9 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
 {
        int i, r;
 
+       if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
+               amdgpu_virt_release_ras_err_handler_data(adev);
+
        amdgpu_ras_pre_fini(adev);
 
        if (adev->gmc.xgmi.num_physical_nodes > 1)
@@ -2438,18 +2462,21 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.valid)
                        continue;
+
                /* displays are handled separately */
-               if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
-                       /* XXX handle errors */
-                       r = adev->ip_blocks[i].version->funcs->suspend(adev);
-                       /* XXX handle errors */
-                       if (r) {
-                               DRM_ERROR("suspend of IP block <%s> failed %d\n",
-                                         adev->ip_blocks[i].version->funcs->name, r);
-                               return r;
-                       }
-                       adev->ip_blocks[i].status.hw = false;
+               if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
+                       continue;
+
+               /* XXX handle errors */
+               r = adev->ip_blocks[i].version->funcs->suspend(adev);
+               /* XXX handle errors */
+               if (r) {
+                       DRM_ERROR("suspend of IP block <%s> failed %d\n",
+                                 adev->ip_blocks[i].version->funcs->name, r);
+                       return r;
                }
+
+               adev->ip_blocks[i].status.hw = false;
        }
 
        return 0;
@@ -2547,6 +2574,9 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
                AMD_IP_BLOCK_TYPE_IH,
        };
 
+       for (i = 0; i < adev->num_ip_blocks; i++)
+               adev->ip_blocks[i].status.hw = false;
+
        for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
                int j;
                struct amdgpu_ip_block *block;
@@ -2554,7 +2584,6 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
                for (j = 0; j < adev->num_ip_blocks; j++) {
                        block = &adev->ip_blocks[j];
 
-                       block->status.hw = false;
                        if (block->version->type != ip_order[i] ||
                                !block->status.valid)
                                continue;
@@ -2778,6 +2807,10 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
        case CHIP_NAVI14:
        case CHIP_NAVI12:
        case CHIP_RENOIR:
+#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+       case CHIP_SIENNA_CICHLID:
+       case CHIP_NAVY_FLOUNDER:
 #endif
                return amdgpu_dc != 0;
 #endif
@@ -3036,6 +3069,17 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        adev->gfx.gfx_off_req_count = 1;
        adev->pm.ac_power = power_supply_is_system_supplied() > 0;
 
+       atomic_set(&adev->throttling_logging_enabled, 1);
+       /*
+        * If throttling continues, logging will be performed every minute
+        * to avoid log flooding. "-1" is subtracted since the thermal
+        * throttling interrupt comes every second. Thus, the total logging
+        * interval is 59 seconds(retelimited printk interval) + 1(waiting
+        * for throttling interrupt) = 60 seconds.
+        */
+       ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
+       ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
+
        /* Registers mapping */
        /* TODO: block userspace mapping of io register */
        if (adev->asic_type >= CHIP_BONAIRE) {
@@ -3274,6 +3318,9 @@ fence_driver_init:
        queue_delayed_work(system_wq, &adev->delayed_init_work,
                           msecs_to_jiffies(AMDGPU_RESUME_MS));
 
+       if (amdgpu_sriov_vf(adev))
+               flush_delayed_work(&adev->delayed_init_work);
+
        r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
        if (r) {
                dev_err(adev->dev, "Could not create amdgpu device attr\n");
@@ -3330,10 +3377,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
                amdgpu_pm_sysfs_fini(adev);
        amdgpu_fbdev_fini(adev);
        r = amdgpu_device_ip_fini(adev);
-       if (adev->firmware.gpu_info_fw) {
-               release_firmware(adev->firmware.gpu_info_fw);
-               adev->firmware.gpu_info_fw = NULL;
-       }
+       release_firmware(adev->firmware.gpu_info_fw);
+       adev->firmware.gpu_info_fw = NULL;
        adev->accel_working = false;
        /* free i2c buses */
        if (!amdgpu_device_has_dc_support(adev))
@@ -3365,7 +3410,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
        if (IS_ENABLED(CONFIG_PERF_EVENTS))
                amdgpu_pmu_fini(adev);
-       if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
+       if (adev->discovery_bin)
                amdgpu_discovery_fini(adev);
 }
 
@@ -3377,7 +3422,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
  * amdgpu_device_suspend - initiate device suspend
  *
  * @dev: drm dev pointer
- * @suspend: suspend state
  * @fbcon : notify the fbdev of suspend
  *
  * Puts the hw in the suspend state (all asics).
@@ -3474,7 +3518,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
  * amdgpu_device_resume - initiate device resume
  *
  * @dev: drm dev pointer
- * @resume: resume state
  * @fbcon : notify the fbdev of resume
  *
  * Bring the hw back to operating state (all asics).
@@ -3839,7 +3882,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
 
        amdgpu_virt_init_data_exchange(adev);
        /* we need recover gart prior to run SMC/CP/SDMA resume */
-       amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
+       amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
 
        r = amdgpu_device_fw_loading(adev);
        if (r)
@@ -3905,6 +3948,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
                case CHIP_NAVI10:
                case CHIP_NAVI14:
                case CHIP_NAVI12:
+               case CHIP_SIENNA_CICHLID:
                        break;
                default:
                        goto disabled;
@@ -4037,8 +4081,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
                                        amdgpu_inc_vram_lost(tmp_adev);
                                }
 
-                               r = amdgpu_gtt_mgr_recover(
-                                       &tmp_adev->mman.bdev.man[TTM_PL_TT]);
+                               r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
                                if (r)
                                        goto out;
 
@@ -4210,18 +4253,19 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
        struct amdgpu_hive_info *hive = NULL;
        struct amdgpu_device *tmp_adev = NULL;
        int i, r = 0;
-       bool in_ras_intr = amdgpu_ras_intr_triggered();
-       bool use_baco =
-               (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ?
-               true : false;
+       bool need_emergency_restart = false;
        bool audio_suspended = false;
 
+       /**
+        * Special case: RAS triggered and full reset isn't supported
+        */
+       need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
+
        /*
         * Flush RAM to disk so that after reboot
         * the user can read log and see why the system rebooted.
         */
-       if (in_ras_intr && !use_baco && amdgpu_ras_get_context(adev)->reboot) {
-
+       if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
                DRM_WARN("Emergency reboot.");
 
                ksys_sync_helper();
@@ -4229,7 +4273,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
        }
 
        dev_info(adev->dev, "GPU %s begin!\n",
-               (in_ras_intr && !use_baco) ? "jobs stop":"reset");
+               need_emergency_restart ? "jobs stop":"reset");
 
        /*
         * Here we trylock to avoid chain of resets executing from
@@ -4301,7 +4345,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                amdgpu_fbdev_set_suspend(tmp_adev, 1);
 
                /* disable ras on ALL IPs */
-               if (!(in_ras_intr && !use_baco) &&
+               if (!need_emergency_restart &&
                      amdgpu_device_ip_need_full_reset(tmp_adev))
                        amdgpu_ras_suspend(tmp_adev);
 
@@ -4313,12 +4357,12 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
                        drm_sched_stop(&ring->sched, job ? &job->base : NULL);
 
-                       if (in_ras_intr && !use_baco)
+                       if (need_emergency_restart)
                                amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
                }
        }
 
-       if (in_ras_intr && !use_baco)
+       if (need_emergency_restart)
                goto skip_sched_resume;
 
        /*
@@ -4395,7 +4439,7 @@ skip_hw_reset:
 skip_sched_resume:
        list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
                /*unlock kfd: SRIOV would do it separately */
-               if (!(in_ras_intr && !use_baco) && !amdgpu_sriov_vf(tmp_adev))
+               if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
                        amdgpu_amdkfd_post_reset(tmp_adev);
                if (audio_suspended)
                        amdgpu_device_resume_display_audio(tmp_adev);
This page took 0.051348 seconds and 4 git commands to generate.