]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_gfx.c
index 70c6099353b828703a89f45547d55ae8380a8d24..a33d4bc34cee746cdee7e70103190fee0aa9f2cd 100644 (file)
@@ -448,8 +448,8 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
 
                        ring->mqd_size = mqd_size;
                        /* prepare MQD backup */
-                       adev->gfx.mec.mqd_backup[j + xcc_id * adev->gfx.num_compute_rings] = kmalloc(mqd_size, GFP_KERNEL);
-                       if (!adev->gfx.mec.mqd_backup[j + xcc_id * adev->gfx.num_compute_rings])
+                       adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL);
+                       if (!adev->gfx.mec.mqd_backup[j])
                                dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
                }
        }
@@ -509,7 +509,7 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                j = i + xcc_id * adev->gfx.num_compute_rings;
                kiq->pmf->kiq_unmap_queues(kiq_ring,
-                                          &adev->gfx.compute_ring[i],
+                                          &adev->gfx.compute_ring[j],
                                           RESET_QUEUES, 0, 0);
        }
 
@@ -541,7 +541,7 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
                for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
                        j = i + xcc_id * adev->gfx.num_gfx_rings;
                        kiq->pmf->kiq_unmap_queues(kiq_ring,
-                                                  &adev->gfx.gfx_ring[i],
+                                                  &adev->gfx.gfx_ring[j],
                                                   PREEMPT_QUEUES, 0, 0);
                }
        }
@@ -593,6 +593,8 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
 
        DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
                                                        kiq_ring->queue);
+       amdgpu_device_flush_hdp(adev, NULL);
+
        spin_lock(&kiq->ring_lock);
        r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
                                        adev->gfx.num_compute_rings +
@@ -630,6 +632,8 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
        if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
                return -EINVAL;
 
+       amdgpu_device_flush_hdp(adev, NULL);
+
        spin_lock(&kiq->ring_lock);
        /* No need to map kcq on the slave */
        if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
@@ -644,7 +648,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
                for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
                        j = i + xcc_id * adev->gfx.num_gfx_rings;
                        kiq->pmf->kiq_map_queues(kiq_ring,
-                                                &adev->gfx.gfx_ring[i]);
+                                                &adev->gfx.gfx_ring[j]);
                }
        }
 
@@ -884,6 +888,25 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
        return 0;
 }
 
+void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
+               void *ras_error_status,
+               void (*func)(struct amdgpu_device *adev, void *ras_error_status,
+                               int xcc_id))
+{
+       int i;
+       int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
+       uint32_t xcc_mask = GENMASK(num_xcc - 1, 0);
+       struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+       if (err_data) {
+               err_data->ue_count = 0;
+               err_data->ce_count = 0;
+       }
+
+       for_each_inst(i, xcc_mask)
+               func(adev, ras_error_status, i);
+}
+
 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
 {
        signed long r, cnt = 0;
@@ -1175,50 +1198,11 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
        int mode;
-       char *partition_mode;
-
-       mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr);
-
-       switch (mode) {
-       case AMDGPU_SPX_PARTITION_MODE:
-               partition_mode = "SPX";
-               break;
-       case AMDGPU_DPX_PARTITION_MODE:
-               partition_mode = "DPX";
-               break;
-       case AMDGPU_TPX_PARTITION_MODE:
-               partition_mode = "TPX";
-               break;
-       case AMDGPU_QPX_PARTITION_MODE:
-               partition_mode = "QPX";
-               break;
-       case AMDGPU_CPX_PARTITION_MODE:
-               partition_mode = "CPX";
-               break;
-       default:
-               partition_mode = "UNKNOWN";
-               break;
-       }
-
-       return sysfs_emit(buf, "%s\n", partition_mode);
-}
-
-static ssize_t amdgpu_gfx_get_current_memory_partition(struct device *dev,
-                                               struct device_attribute *addr,
-                                               char *buf)
-{
-       struct drm_device *ddev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = drm_to_adev(ddev);
-       enum amdgpu_memory_partition mode;
-       static const char *partition_modes[] = {
-               "UNKNOWN", "NPS1", "NPS2", "NPS4", "NPS8"
-       };
-       BUILD_BUG_ON(ARRAY_SIZE(partition_modes) <= AMDGPU_NPS8_PARTITION_MODE);
 
-       mode = min((int)adev->gfx.funcs->query_mem_partition_mode(adev),
-               AMDGPU_NPS8_PARTITION_MODE);
+       mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
+                                              AMDGPU_XCP_FL_NONE);
 
-       return sysfs_emit(buf, "%s\n", partition_modes[mode]);
+       return sysfs_emit(buf, "%s\n", amdgpu_gfx_compute_mode_desc(mode));
 }
 
 static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
@@ -1304,9 +1288,6 @@ static DEVICE_ATTR(current_compute_partition, S_IRUGO | S_IWUSR,
 static DEVICE_ATTR(available_compute_partition, S_IRUGO,
                   amdgpu_gfx_get_available_compute_partition, NULL);
 
-static DEVICE_ATTR(current_memory_partition, S_IRUGO,
-                  amdgpu_gfx_get_current_memory_partition, NULL);
-
 int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
 {
        int r;
@@ -1316,19 +1297,12 @@ int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
                return r;
 
        r = device_create_file(adev->dev, &dev_attr_available_compute_partition);
-       if (r)
-               return r;
 
-       r = device_create_file(adev->dev, &dev_attr_current_memory_partition);
-       if (r)
-               return r;
-
-       return 0;
+       return r;
 }
 
 void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
 {
        device_remove_file(adev->dev, &dev_attr_current_compute_partition);
        device_remove_file(adev->dev, &dev_attr_available_compute_partition);
-       device_remove_file(adev->dev, &dev_attr_current_memory_partition);
 }
This page took 0.041745 seconds and 4 git commands to generate.