#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
#include "amdgpu_ras.h"
+#include "amdgpu_xcp.h"
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe,
adev->gfx.num_compute_rings);
- int num_xcd = (adev->gfx.num_xcd > 1) ? adev->gfx.num_xcd : 1;
+ int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
if (multipipe_policy) {
/* policy: make queues evenly cross all pipes on MEC1 only
* for multiple xcc, just use the original policy for simplicity */
- for (j = 0; j < num_xcd; j++) {
+ for (j = 0; j < num_xcc; j++) {
for (i = 0; i < max_queues_per_mec; i++) {
pipe = i % adev->gfx.mec.num_pipe_per_mec;
queue = (i / adev->gfx.mec.num_pipe_per_mec) %
}
} else {
/* policy: amdgpu owns all queues in the given pipe */
- for (j = 0; j < num_xcd; j++) {
+ for (j = 0; j < num_xcc; j++) {
for (i = 0; i < max_queues_per_mec; ++i)
set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap);
}
}
- for (j = 0; j < num_xcd; j++) {
+ for (j = 0; j < num_xcc; j++) {
dev_dbg(adev->dev, "mec queue bitmap weight=%d\n",
bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
}
ring->adev = NULL;
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = adev->doorbell_index.kiq;
ring->xcc_id = xcc_id;
ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
- if (xcc_id >= 1)
- ring->doorbell_index = adev->doorbell_index.xcc1_kiq_start +
- xcc_id - 1;
- else
- ring->doorbell_index = adev->doorbell_index.kiq;
+ ring->doorbell_index =
+ (adev->doorbell_index.kiq +
+ xcc_id * adev->doorbell_index.xcc_doorbell_range)
+ << 1;
r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id);
if (r)
ring->mqd_size = mqd_size;
/* prepare MQD backup */
- adev->gfx.mec.mqd_backup[j + xcc_id * adev->gfx.num_compute_rings] = kmalloc(mqd_size, GFP_KERNEL);
- if (!adev->gfx.mec.mqd_backup[j + xcc_id * adev->gfx.num_compute_rings])
+ adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL);
+ if (!adev->gfx.mec.mqd_backup[j])
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
}
}
ring = &kiq->ring;
kfree(kiq->mqd_backup);
- kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,
&ring->mqd_ptr);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
j = i + xcc_id * adev->gfx.num_compute_rings;
kiq->pmf->kiq_unmap_queues(kiq_ring,
- &adev->gfx.compute_ring[i],
+ &adev->gfx.compute_ring[j],
RESET_QUEUES, 0, 0);
}
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
j = i + xcc_id * adev->gfx.num_gfx_rings;
kiq->pmf->kiq_unmap_queues(kiq_ring,
- &adev->gfx.gfx_ring[i],
+ &adev->gfx.gfx_ring[j],
PREEMPT_QUEUES, 0, 0);
}
}
DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
kiq_ring->queue);
+ amdgpu_device_flush_hdp(adev, NULL);
+
spin_lock(&kiq->ring_lock);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_compute_rings +
if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
return -EINVAL;
+ amdgpu_device_flush_hdp(adev, NULL);
+
spin_lock(&kiq->ring_lock);
/* No need to map kcq on the slave */
if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
j = i + xcc_id * adev->gfx.num_gfx_rings;
kiq->pmf->kiq_map_queues(kiq_ring,
- &adev->gfx.gfx_ring[i]);
+ &adev->gfx.gfx_ring[j]);
}
}
return 0;
}
+void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
+ void *ras_error_status,
+ void (*func)(struct amdgpu_device *adev, void *ras_error_status,
+ int xcc_id))
+{
+ int i;
+ int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
+ uint32_t xcc_mask = GENMASK(num_xcc - 1, 0);
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ if (err_data) {
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+ }
+
+ for_each_inst(i, xcc_mask)
+ func(adev, ras_error_status, i);
+}
+
uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
{
signed long r, cnt = 0;
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- enum amdgpu_gfx_partition mode;
- char *partition_mode;
-
- mode = adev->gfx.funcs->query_partition_mode(adev);
+ int mode;
- switch (mode) {
- case AMDGPU_SPX_PARTITION_MODE:
- partition_mode = "SPX";
- break;
- case AMDGPU_DPX_PARTITION_MODE:
- partition_mode = "DPX";
- break;
- case AMDGPU_TPX_PARTITION_MODE:
- partition_mode = "TPX";
- break;
- case AMDGPU_QPX_PARTITION_MODE:
- partition_mode = "QPX";
- break;
- case AMDGPU_CPX_PARTITION_MODE:
- partition_mode = "CPX";
- break;
- default:
- partition_mode = "UNKNOWN";
- break;
- }
+ mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
+ AMDGPU_XCP_FL_NONE);
- return sysfs_emit(buf, "%s\n", partition_mode);
+ return sysfs_emit(buf, "%s\n", amdgpu_gfx_compute_mode_desc(mode));
}
static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
enum amdgpu_gfx_partition mode;
- int ret;
+ int ret = 0, num_xcc;
- if (adev->gfx.num_xcd % 2 != 0)
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ if (num_xcc % 2 != 0)
return -EINVAL;
if (!strncasecmp("SPX", buf, strlen("SPX"))) {
mode = AMDGPU_SPX_PARTITION_MODE;
} else if (!strncasecmp("DPX", buf, strlen("DPX"))) {
- if (adev->gfx.num_xcd != 4 || adev->gfx.num_xcd != 8)
+ /*
+ * DPX mode needs AIDs to be in multiple of 2.
+ * Each AID connects 2 XCCs.
+ */
+ if (num_xcc%4)
return -EINVAL;
mode = AMDGPU_DPX_PARTITION_MODE;
} else if (!strncasecmp("TPX", buf, strlen("TPX"))) {
- if (adev->gfx.num_xcd != 6)
+ if (num_xcc != 6)
return -EINVAL;
mode = AMDGPU_TPX_PARTITION_MODE;
} else if (!strncasecmp("QPX", buf, strlen("QPX"))) {
- if (adev->gfx.num_xcd != 8)
+ if (num_xcc != 8)
return -EINVAL;
mode = AMDGPU_QPX_PARTITION_MODE;
} else if (!strncasecmp("CPX", buf, strlen("CPX"))) {
return -EINVAL;
}
- mutex_lock(&adev->gfx.partition_mutex);
-
- ret = adev->gfx.funcs->switch_partition_mode(adev, mode);
-
- mutex_unlock(&adev->gfx.partition_mutex);
+ ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
if (ret)
return ret;
char *supported_partition;
/* TBD */
- switch (adev->gfx.num_xcd) {
+ switch (NUM_XCC(adev->gfx.xcc_mask)) {
case 8:
supported_partition = "SPX, DPX, QPX, CPX";
break;
return r;
r = device_create_file(adev->dev, &dev_attr_available_compute_partition);
- if (r)
- return r;
- return 0;
+ return r;
+}
+
+void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
+{
+ device_remove_file(adev->dev, &dev_attr_current_compute_partition);
+ device_remove_file(adev->dev, &dev_attr_available_compute_partition);
}