]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
Merge tag 'x86_urgent_for_v6.7_rc2' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
index d5f78179b2b6efa79818a666553ba3b6f5923371..7eeaf0aa7f8121fc59dcd30e48a00dc9750d5e5f 100644 (file)
@@ -41,6 +41,7 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/amdgpu_drm.h>
+#include <linux/device.h>
 #include <linux/vgaarb.h>
 #include <linux/vga_switcheroo.h>
 #include <linux/efi.h>
@@ -72,6 +73,7 @@
 #include "amdgpu_pmu.h"
 #include "amdgpu_fru_eeprom.h"
 #include "amdgpu_reset.h"
+#include "amdgpu_virt.h"
 
 #include <linux/suspend.h>
 #include <drm/task_barrier.h>
@@ -471,7 +473,7 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
                if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
                    amdgpu_sriov_runtime(adev) &&
                    down_read_trylock(&adev->reset_domain->sem)) {
-                       ret = amdgpu_kiq_rreg(adev, reg);
+                       ret = amdgpu_kiq_rreg(adev, reg, 0);
                        up_read(&adev->reset_domain->sem);
                } else {
                        ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
@@ -508,6 +510,49 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
        BUG();
 }
 
+
+/**
+ * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
+ *
+ * @adev: amdgpu_device pointer
+ * @reg: dword aligned register offset
+ * @acc_flags: access flags which require special behavior
+ * @xcc_id: xcc accelerated compute core id
+ *
+ * Returns the 32 bit value from the offset specified.
+ */
+uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
+                               uint32_t reg, uint32_t acc_flags,
+                               uint32_t xcc_id)
+{
+       uint32_t ret, rlcg_flag;
+
+       if (amdgpu_device_skip_hw_access(adev))
+               return 0;
+
+       if ((reg * 4) < adev->rmmio_size) {
+               if (amdgpu_sriov_vf(adev) &&
+                   !amdgpu_sriov_runtime(adev) &&
+                   adev->gfx.rlc.rlcg_reg_access_supported &&
+                   amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
+                                                        GC_HWIP, false,
+                                                        &rlcg_flag)) {
+                       ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, xcc_id);
+               } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
+                   amdgpu_sriov_runtime(adev) &&
+                   down_read_trylock(&adev->reset_domain->sem)) {
+                       ret = amdgpu_kiq_rreg(adev, reg, xcc_id);
+                       up_read(&adev->reset_domain->sem);
+               } else {
+                       ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
+               }
+       } else {
+               ret = adev->pcie_rreg(adev, reg * 4);
+       }
+
+       return ret;
+}
+
 /*
  * MMIO register write with bytes helper functions
  * @offset:bytes offset from MMIO start
@@ -555,7 +600,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
                if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
                    amdgpu_sriov_runtime(adev) &&
                    down_read_trylock(&adev->reset_domain->sem)) {
-                       amdgpu_kiq_wreg(adev, reg, v);
+                       amdgpu_kiq_wreg(adev, reg, v, 0);
                        up_read(&adev->reset_domain->sem);
                } else {
                        writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
@@ -596,6 +641,47 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
        }
 }
 
+/**
+ * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
+ *
+ * @adev: amdgpu_device pointer
+ * @reg: dword aligned register offset
+ * @v: 32 bit value to write to the register
+ * @acc_flags: access flags which require special behavior
+ * @xcc_id: xcc accelerated compute core id
+ *
+ * Writes the value specified to the offset specified.
+ */
+void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
+                       uint32_t reg, uint32_t v,
+                       uint32_t acc_flags, uint32_t xcc_id)
+{
+       uint32_t rlcg_flag;
+
+       if (amdgpu_device_skip_hw_access(adev))
+               return;
+
+       if ((reg * 4) < adev->rmmio_size) {
+               if (amdgpu_sriov_vf(adev) &&
+                   !amdgpu_sriov_runtime(adev) &&
+                   adev->gfx.rlc.rlcg_reg_access_supported &&
+                   amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
+                                                        GC_HWIP, true,
+                                                        &rlcg_flag)) {
+                       amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, xcc_id);
+               } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
+                   amdgpu_sriov_runtime(adev) &&
+                   down_read_trylock(&adev->reset_domain->sem)) {
+                       amdgpu_kiq_wreg(adev, reg, v, xcc_id);
+                       up_read(&adev->reset_domain->sem);
+               } else {
+                       writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
+               }
+       } else {
+               adev->pcie_wreg(adev, reg * 4, v);
+       }
+}
+
 /**
  * amdgpu_device_indirect_rreg - read an indirect register
  *
@@ -1073,6 +1159,8 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
            amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
                amdgpu_psp_wait_for_bootloader(adev);
                ret = amdgpu_atomfirmware_asic_init(adev, true);
+               /* TODO: check the return val and stop device initialization if boot fails */
+               amdgpu_psp_query_boot_status(adev);
                return ret;
        } else {
                return amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -2223,7 +2311,6 @@ out:
  */
 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
 {
-       struct drm_device *dev = adev_to_drm(adev);
        struct pci_dev *parent;
        int i, r;
        bool total;
@@ -2294,7 +2381,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
            (amdgpu_is_atpx_hybrid() ||
             amdgpu_has_atpx_dgpu_power_cntl()) &&
            ((adev->flags & AMD_IS_APU) == 0) &&
-           !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
+           !dev_is_removable(&adev->pdev->dev))
                adev->flags |= AMD_IS_PX;
 
        if (!(adev->flags & AMD_IS_APU)) {
@@ -2497,6 +2584,18 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
                                  ring->name);
                        return r;
                }
+               r = amdgpu_uvd_entity_init(adev, ring);
+               if (r) {
+                       DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
+                                 ring->name);
+                       return r;
+               }
+               r = amdgpu_vce_entity_init(adev, ring);
+               if (r) {
+                       DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
+                                 ring->name);
+                       return r;
+               }
        }
 
        amdgpu_xcp_update_partition_sched_list(adev);
@@ -3962,13 +4061,23 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                                }
                        }
                } else {
-                       tmp = amdgpu_reset_method;
-                       /* It should do a default reset when loading or reloading the driver,
-                        * regardless of the module parameter reset_method.
-                        */
-                       amdgpu_reset_method = AMD_RESET_METHOD_NONE;
-                       r = amdgpu_asic_reset(adev);
-                       amdgpu_reset_method = tmp;
+                       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+                       case IP_VERSION(13, 0, 0):
+                       case IP_VERSION(13, 0, 7):
+                       case IP_VERSION(13, 0, 10):
+                               r = psp_gpu_reset(adev);
+                               break;
+                       default:
+                               tmp = amdgpu_reset_method;
+                               /* It should do a default reset when loading or reloading the driver,
+                                * regardless of the module parameter reset_method.
+                                */
+                               amdgpu_reset_method = AMD_RESET_METHOD_NONE;
+                               r = amdgpu_asic_reset(adev);
+                               amdgpu_reset_method = tmp;
+                               break;
+                       }
+
                        if (r) {
                                dev_err(adev->dev, "asic reset on init failed\n");
                                goto failed;
@@ -4132,7 +4241,7 @@ fence_driver_init:
 
        px = amdgpu_device_supports_px(ddev);
 
-       if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
+       if (px || (!dev_is_removable(&adev->pdev->dev) &&
                                apple_gmux_detect(NULL, NULL)))
                vga_switcheroo_register_client(adev->pdev,
                                               &amdgpu_switcheroo_ops, px);
@@ -4282,7 +4391,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
 
        px = amdgpu_device_supports_px(adev_to_drm(adev));
 
-       if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
+       if (px || (!dev_is_removable(&adev->pdev->dev) &&
                                apple_gmux_detect(NULL, NULL)))
                vga_switcheroo_unregister_client(adev->pdev);
 
@@ -4474,19 +4583,18 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
        }
        amdgpu_fence_driver_hw_init(adev);
 
-       r = amdgpu_device_ip_late_init(adev);
-       if (r)
-               goto exit;
-
-       queue_delayed_work(system_wq, &adev->delayed_init_work,
-                          msecs_to_jiffies(AMDGPU_RESUME_MS));
-
        if (!adev->in_s0ix) {
                r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
                if (r)
                        goto exit;
        }
 
+       r = amdgpu_device_ip_late_init(adev);
+       if (r)
+               goto exit;
+
+       queue_delayed_work(system_wq, &adev->delayed_init_work,
+                          msecs_to_jiffies(AMDGPU_RESUME_MS));
 exit:
        if (amdgpu_sriov_vf(adev)) {
                amdgpu_virt_init_data_exchange(adev);
@@ -5566,10 +5674,6 @@ skip_hw_reset:
                        drm_sched_start(&ring->sched, true);
                }
 
-               if (adev->enable_mes &&
-                   amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3))
-                       amdgpu_mes_self_test(tmp_adev);
-
                if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
                        drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
 
This page took 0.046361 seconds and 4 git commands to generate.