]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drm/nouveau/kms: Don't change EDID when it hasn't actually changed
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
index a649e40fd96f7333afd1443a0f9e30e8abb0b472..4204cda680f5668d6d0c284e285e5852c3b17c22 100644 (file)
@@ -81,6 +81,7 @@ MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/sienna_cichlid_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/navy_flounder_gpu_info.bin");
 
 #define AMDGPU_RESUME_MS               2000
 
@@ -114,6 +115,7 @@ const char *amdgpu_asic_name[] = {
        "NAVI14",
        "NAVI12",
        "SIENNA_CICHLID",
+       "NAVY_FLOUNDER",
        "LAST",
 };
 
@@ -301,10 +303,10 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 }
 
 /*
- * device register access helper functions.
+ * MMIO register access helper functions.
  */
 /**
- * amdgpu_device_rreg - read a register
+ * amdgpu_mm_rreg - read a memory mapped IO register
  *
  * @adev: amdgpu_device pointer
  * @reg: dword aligned register offset
@@ -312,8 +314,8 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
  *
  * Returns the 32 bit value from the offset specified.
  */
-uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg,
-                           uint32_t acc_flags)
+uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
+                       uint32_t acc_flags)
 {
        uint32_t ret;
 
@@ -322,9 +324,15 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg,
 
        if ((reg * 4) < adev->rmmio_size)
                ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
-       else
-               ret = adev->pcie_rreg(adev, (reg * 4));
-       trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
+       else {
+               unsigned long flags;
+
+               spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+               writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
+               ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
+               spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+       }
+       trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
        return ret;
 }
 
@@ -370,19 +378,24 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
                BUG();
 }
 
-void static inline amdgpu_device_wreg_no_kiq(struct amdgpu_device *adev, uint32_t reg,
-                                            uint32_t v, uint32_t acc_flags)
+void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
 {
-       trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
+       trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
 
        if ((reg * 4) < adev->rmmio_size)
                writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
-       else
-               adev->pcie_wreg(adev, (reg * 4), v);
+       else {
+               unsigned long flags;
+
+               spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+               writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
+               writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
+               spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+       }
 }
 
 /**
- * amdgpu_device_wreg - write to a register
+ * amdgpu_mm_wreg - write to a memory mapped IO register
  *
  * @adev: amdgpu_device pointer
  * @reg: dword aligned register offset
@@ -391,13 +404,13 @@ void static inline amdgpu_device_wreg_no_kiq(struct amdgpu_device *adev, uint32_
  *
  * Writes the value specified to the offset specified.
  */
-void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
-                       uint32_t acc_flags)
+void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+                   uint32_t acc_flags)
 {
        if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
                return amdgpu_kiq_wreg(adev, reg, v);
 
-       amdgpu_device_wreg_no_kiq(adev, reg, v, acc_flags);
+       amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
 }
 
 /*
@@ -416,7 +429,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t
                        return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
        }
 
-       amdgpu_device_wreg_no_kiq(adev, reg, v, acc_flags);
+       amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
 }
 
 /**
@@ -1621,6 +1634,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
        case CHIP_SIENNA_CICHLID:
                chip_name = "sienna_cichlid";
                break;
+       case CHIP_NAVY_FLOUNDER:
+               chip_name = "navy_flounder";
+               break;
        }
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
@@ -1722,6 +1738,12 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
 
        amdgpu_device_enable_virtual_display(adev);
 
+       if (amdgpu_sriov_vf(adev)) {
+               r = amdgpu_virt_request_full_gpu(adev, true);
+               if (r)
+                       return r;
+       }
+
        switch (adev->asic_type) {
 #ifdef CONFIG_DRM_AMDGPU_SI
        case CHIP_VERDE:
@@ -1788,6 +1810,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
        case  CHIP_NAVI14:
        case  CHIP_NAVI12:
        case  CHIP_SIENNA_CICHLID:
+       case  CHIP_NAVY_FLOUNDER:
                adev->family = AMDGPU_FAMILY_NV;
 
                r = nv_set_ip_blocks(adev);
@@ -1801,31 +1824,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
 
        amdgpu_amdkfd_device_probe(adev);
 
-       if (amdgpu_sriov_vf(adev)) {
-               /* handle vbios stuff prior full access mode for new handshake */
-               if (adev->virt.req_init_data_ver == 1) {
-                       if (!amdgpu_get_bios(adev)) {
-                               DRM_ERROR("failed to get vbios\n");
-                               return -EINVAL;
-                       }
-
-                       r = amdgpu_atombios_init(adev);
-                       if (r) {
-                               dev_err(adev->dev, "amdgpu_atombios_init failed\n");
-                               amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
-                               return r;
-                       }
-               }
-       }
-
-       /* we need to send REQ_GPU here for legacy handshaker otherwise the vbios
-        * will not be prepared by host for this VF */
-       if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver < 1) {
-               r = amdgpu_virt_request_full_gpu(adev, true);
-               if (r)
-                       return r;
-       }
-
        adev->pm.pp_feature = amdgpu_pp_feature_mask;
        if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
                adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
@@ -1857,10 +1855,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
                        if (r)
                                return r;
 
-                       /* skip vbios handling for new handshake */
-                       if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver == 1)
-                               continue;
-
                        /* Read BIOS */
                        if (!amdgpu_get_bios(adev))
                                return -EINVAL;
@@ -1987,12 +1981,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
        if (r)
                return r;
 
-       if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver > 0) {
-               r = amdgpu_virt_request_full_gpu(adev, true);
-               if (r)
-                       return -EAGAIN;
-       }
-
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if (!adev->ip_blocks[i].status.valid)
                        continue;
@@ -2474,18 +2462,21 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.valid)
                        continue;
+
                /* displays are handled separately */
-               if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
-                       /* XXX handle errors */
-                       r = adev->ip_blocks[i].version->funcs->suspend(adev);
-                       /* XXX handle errors */
-                       if (r) {
-                               DRM_ERROR("suspend of IP block <%s> failed %d\n",
-                                         adev->ip_blocks[i].version->funcs->name, r);
-                               return r;
-                       }
-                       adev->ip_blocks[i].status.hw = false;
+               if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
+                       continue;
+
+               /* XXX handle errors */
+               r = adev->ip_blocks[i].version->funcs->suspend(adev);
+               /* XXX handle errors */
+               if (r) {
+                       DRM_ERROR("suspend of IP block <%s> failed %d\n",
+                                 adev->ip_blocks[i].version->funcs->name, r);
+                       return r;
                }
+
+               adev->ip_blocks[i].status.hw = false;
        }
 
        return 0;
@@ -2583,6 +2574,9 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
                AMD_IP_BLOCK_TYPE_IH,
        };
 
+       for (i = 0; i < adev->num_ip_blocks; i++)
+               adev->ip_blocks[i].status.hw = false;
+
        for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
                int j;
                struct amdgpu_ip_block *block;
@@ -2590,7 +2584,6 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
                for (j = 0; j < adev->num_ip_blocks; j++) {
                        block = &adev->ip_blocks[j];
 
-                       block->status.hw = false;
                        if (block->version->type != ip_order[i] ||
                                !block->status.valid)
                                continue;
@@ -2817,6 +2810,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
 #endif
 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
        case CHIP_SIENNA_CICHLID:
+       case CHIP_NAVY_FLOUNDER:
 #endif
                return amdgpu_dc != 0;
 #endif
@@ -3324,6 +3318,9 @@ fence_driver_init:
        queue_delayed_work(system_wq, &adev->delayed_init_work,
                           msecs_to_jiffies(AMDGPU_RESUME_MS));
 
+       if (amdgpu_sriov_vf(adev))
+               flush_delayed_work(&adev->delayed_init_work);
+
        r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
        if (r) {
                dev_err(adev->dev, "Could not create amdgpu device attr\n");
@@ -3885,7 +3882,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
 
        amdgpu_virt_init_data_exchange(adev);
        /* we need recover gart prior to run SMC/CP/SDMA resume */
-       amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
+       amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
 
        r = amdgpu_device_fw_loading(adev);
        if (r)
@@ -3951,6 +3948,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
                case CHIP_NAVI10:
                case CHIP_NAVI14:
                case CHIP_NAVI12:
+               case CHIP_SIENNA_CICHLID:
                        break;
                default:
                        goto disabled;
@@ -4083,8 +4081,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
                                        amdgpu_inc_vram_lost(tmp_adev);
                                }
 
-                               r = amdgpu_gtt_mgr_recover(
-                                       &tmp_adev->mman.bdev.man[TTM_PL_TT]);
+                               r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
                                if (r)
                                        goto out;
 
@@ -4256,18 +4253,19 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
        struct amdgpu_hive_info *hive = NULL;
        struct amdgpu_device *tmp_adev = NULL;
        int i, r = 0;
-       bool in_ras_intr = amdgpu_ras_intr_triggered();
-       bool use_baco =
-               (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ?
-               true : false;
+       bool need_emergency_restart = false;
        bool audio_suspended = false;
 
+       /**
+        * Special case: RAS triggered and full reset isn't supported
+        */
+       need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
+
        /*
         * Flush RAM to disk so that after reboot
         * the user can read log and see why the system rebooted.
         */
-       if (in_ras_intr && !use_baco && amdgpu_ras_get_context(adev)->reboot) {
-
+       if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
                DRM_WARN("Emergency reboot.");
 
                ksys_sync_helper();
@@ -4275,7 +4273,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
        }
 
        dev_info(adev->dev, "GPU %s begin!\n",
-               (in_ras_intr && !use_baco) ? "jobs stop":"reset");
+               need_emergency_restart ? "jobs stop":"reset");
 
        /*
         * Here we trylock to avoid chain of resets executing from
@@ -4347,7 +4345,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                amdgpu_fbdev_set_suspend(tmp_adev, 1);
 
                /* disable ras on ALL IPs */
-               if (!(in_ras_intr && !use_baco) &&
+               if (!need_emergency_restart &&
                      amdgpu_device_ip_need_full_reset(tmp_adev))
                        amdgpu_ras_suspend(tmp_adev);
 
@@ -4359,12 +4357,12 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
                        drm_sched_stop(&ring->sched, job ? &job->base : NULL);
 
-                       if (in_ras_intr && !use_baco)
+                       if (need_emergency_restart)
                                amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
                }
        }
 
-       if (in_ras_intr && !use_baco)
+       if (need_emergency_restart)
                goto skip_sched_resume;
 
        /*
@@ -4441,7 +4439,7 @@ skip_hw_reset:
 skip_sched_resume:
        list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
                /*unlock kfd: SRIOV would do it separately */
-               if (!(in_ras_intr && !use_baco) && !amdgpu_sriov_vf(tmp_adev))
+               if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
                        amdgpu_amdkfd_post_reset(tmp_adev);
                if (audio_suspended)
                        amdgpu_device_resume_display_audio(tmp_adev);
This page took 0.050865 seconds and 4 git commands to generate.