2 * Copyright 2022 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "soc15_common.h"
27 #include "amdgpu_reg_state.h"
28 #include "amdgpu_xcp.h"
29 #include "gfx_v9_4_3.h"
30 #include "gfxhub_v1_2.h"
31 #include "sdma_v4_4_2.h"
33 #define XCP_INST_MASK(num_inst, xcp_id) \
34 (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
36 #define AMDGPU_XCP_OPS_KFD (1 << 0)
38 void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
42 adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
44 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
46 adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
47 adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
48 adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
50 adev->doorbell_index.sdma_doorbell_range = 20;
51 for (i = 0; i < adev->sdma.num_instances; i++)
52 adev->doorbell_index.sdma_engine[i] =
53 AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
54 i * (adev->doorbell_index.sdma_doorbell_range >> 1);
56 adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
57 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
59 adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
60 adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
62 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
65 static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
67 return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
70 static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
71 uint32_t inst_idx, struct amdgpu_ring *ring)
74 enum AMDGPU_XCP_IP_BLOCK ip_blk;
77 ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
78 if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
81 inst_mask = 1 << inst_idx;
83 switch (ring->funcs->type) {
84 case AMDGPU_HW_IP_GFX:
85 case AMDGPU_RING_TYPE_COMPUTE:
86 case AMDGPU_RING_TYPE_KIQ:
87 ip_blk = AMDGPU_XCP_GFX;
89 case AMDGPU_RING_TYPE_SDMA:
90 ip_blk = AMDGPU_XCP_SDMA;
92 case AMDGPU_RING_TYPE_VCN_ENC:
93 case AMDGPU_RING_TYPE_VCN_JPEG:
94 ip_blk = AMDGPU_XCP_VCN;
95 if (aqua_vanjaram_xcp_vcn_shared(adev))
96 inst_mask = 1 << (inst_idx * 2);
99 DRM_ERROR("Not support ring type %d!", ring->funcs->type);
103 for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
104 if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
105 ring->xcp_id = xcp_id;
111 static void aqua_vanjaram_xcp_gpu_sched_update(
112 struct amdgpu_device *adev,
113 struct amdgpu_ring *ring,
114 unsigned int sel_xcp_id)
116 unsigned int *num_gpu_sched;
118 num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
119 .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
120 adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
121 .sched[(*num_gpu_sched)++] = &ring->sched;
122 DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
123 sel_xcp_id, ring->funcs->type,
124 ring->hw_prio, *num_gpu_sched);
127 static int aqua_vanjaram_xcp_sched_list_update(
128 struct amdgpu_device *adev)
130 struct amdgpu_ring *ring;
133 for (i = 0; i < MAX_XCP; i++) {
134 atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
135 memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
138 if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
141 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
142 ring = adev->rings[i];
143 if (!ring || !ring->sched.ready || ring->no_scheduler)
146 aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
148 /* VCN may be shared by two partitions under CPX MODE in certain
151 if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
152 ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
153 aqua_vanjaram_xcp_vcn_shared(adev))
154 aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
160 static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
164 for (i = 0; i < adev->num_rings; i++) {
165 struct amdgpu_ring *ring = adev->rings[i];
167 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
168 ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
169 aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
171 aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
174 return aqua_vanjaram_xcp_sched_list_update(adev);
177 static int aqua_vanjaram_select_scheds(
178 struct amdgpu_device *adev,
181 struct amdgpu_fpriv *fpriv,
182 unsigned int *num_scheds,
183 struct drm_gpu_scheduler ***scheds)
188 if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
189 u32 least_ref_cnt = ~0;
192 for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
195 total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
196 if (total_ref_cnt < least_ref_cnt) {
198 least_ref_cnt = total_ref_cnt;
202 sel_xcp_id = fpriv->xcp_id;
204 if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
205 *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
206 *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
207 atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
208 DRM_DEBUG("Selected partition #%d", sel_xcp_id);
210 DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
217 static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
218 enum amd_hw_ip_block_type block,
226 /* Both JPEG and VCN as JPEG is only alias of VCN */
228 dev_inst = adev->ip_map.dev_inst[block][inst];
231 /* For rest of the IPs, no look up required.
232 * Assume 'logical instance == physical instance' for all configs. */
240 static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
241 enum amd_hw_ip_block_type block,
244 uint32_t dev_mask = 0;
245 int8_t log_inst, dev_inst;
248 log_inst = ffs(mask) - 1;
249 dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
250 dev_mask |= (1 << dev_inst);
251 mask &= ~(1 << log_inst);
257 static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
258 enum amd_hw_ip_block_type ip_block,
264 i = ffs(inst_mask) - 1;
265 adev->ip_map.dev_inst[ip_block][l++] = i;
266 inst_mask &= ~(1 << i);
268 for (; l < HWIP_MAX_INSTANCE; l++)
269 adev->ip_map.dev_inst[ip_block][l] = -1;
272 void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
275 { GC_HWIP, adev->gfx.xcc_mask },
276 { SDMA0_HWIP, adev->sdma.sdma_mask },
277 { VCN_HWIP, adev->vcn.inst_mask },
281 for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
282 aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
284 adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
285 adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
288 /* Fixed pattern for smn addressing on different AIDs:
289 * bit[34]: indicate cross AID access
290 * bit[33:32]: indicate target AID id
291 * AID id range is 0 ~ 3 as maximum AID number is 4.
293 u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
297 /* local routing and bit[34:32] will be zeros */
301 /* Initiated from host, accessing to all non-zero aids are cross traffic */
302 ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
307 static enum amdgpu_gfx_partition
308 __aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
310 struct amdgpu_device *adev = xcp_mgr->adev;
311 int num_xcc, num_xcc_per_xcp = 0, mode = 0;
313 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
314 if (adev->gfx.funcs->get_xccs_per_xcp)
315 num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
316 if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0))
317 mode = num_xcc / num_xcc_per_xcp;
319 if (num_xcc_per_xcp == 1)
320 return AMDGPU_CPX_PARTITION_MODE;
324 return AMDGPU_SPX_PARTITION_MODE;
326 return AMDGPU_DPX_PARTITION_MODE;
328 return AMDGPU_TPX_PARTITION_MODE;
330 return AMDGPU_QPX_PARTITION_MODE;
332 return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
335 return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
338 static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
340 enum amdgpu_gfx_partition derv_mode,
341 mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
342 struct amdgpu_device *adev = xcp_mgr->adev;
344 derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr);
346 if (amdgpu_sriov_vf(adev))
349 if (adev->nbio.funcs->get_compute_partition_mode) {
350 mode = adev->nbio.funcs->get_compute_partition_mode(adev);
351 if (mode != derv_mode)
354 "Mismatch in compute partition mode - reported : %d derived : %d",
361 static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
363 int num_xcc, num_xcc_per_xcp = 0;
365 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
368 case AMDGPU_SPX_PARTITION_MODE:
369 num_xcc_per_xcp = num_xcc;
371 case AMDGPU_DPX_PARTITION_MODE:
372 num_xcc_per_xcp = num_xcc / 2;
374 case AMDGPU_TPX_PARTITION_MODE:
375 num_xcc_per_xcp = num_xcc / 3;
377 case AMDGPU_QPX_PARTITION_MODE:
378 num_xcc_per_xcp = num_xcc / 4;
380 case AMDGPU_CPX_PARTITION_MODE:
385 return num_xcc_per_xcp;
388 static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
389 enum AMDGPU_XCP_IP_BLOCK ip_id,
390 struct amdgpu_xcp_ip *ip)
392 struct amdgpu_device *adev = xcp_mgr->adev;
393 int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
394 int num_sdma, num_vcn;
396 num_sdma = adev->sdma.num_instances;
397 num_vcn = adev->vcn.num_vcn_inst;
399 switch (xcp_mgr->mode) {
400 case AMDGPU_SPX_PARTITION_MODE:
401 num_sdma_xcp = num_sdma;
402 num_vcn_xcp = num_vcn;
404 case AMDGPU_DPX_PARTITION_MODE:
405 num_sdma_xcp = num_sdma / 2;
406 num_vcn_xcp = num_vcn / 2;
408 case AMDGPU_TPX_PARTITION_MODE:
409 num_sdma_xcp = num_sdma / 3;
410 num_vcn_xcp = num_vcn / 3;
412 case AMDGPU_QPX_PARTITION_MODE:
413 num_sdma_xcp = num_sdma / 4;
414 num_vcn_xcp = num_vcn / 4;
416 case AMDGPU_CPX_PARTITION_MODE:
418 num_vcn_xcp = num_vcn ? 1 : 0;
424 num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
427 case AMDGPU_XCP_GFXHUB:
428 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
429 ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
432 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
433 ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
435 case AMDGPU_XCP_SDMA:
436 ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
437 ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
440 ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id);
441 /* TODO : Assign IP funcs */
452 static enum amdgpu_gfx_partition
453 __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
455 struct amdgpu_device *adev = xcp_mgr->adev;
458 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
460 if (adev->gmc.num_mem_partitions == 1)
461 return AMDGPU_SPX_PARTITION_MODE;
463 if (adev->gmc.num_mem_partitions == num_xcc)
464 return AMDGPU_CPX_PARTITION_MODE;
466 if (adev->gmc.num_mem_partitions == num_xcc / 2)
467 return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
468 AMDGPU_CPX_PARTITION_MODE;
470 if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
471 return AMDGPU_DPX_PARTITION_MODE;
473 return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
476 static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
477 enum amdgpu_gfx_partition mode)
479 struct amdgpu_device *adev = xcp_mgr->adev;
480 int num_xcc, num_xccs_per_xcp;
482 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
484 case AMDGPU_SPX_PARTITION_MODE:
485 return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
486 case AMDGPU_DPX_PARTITION_MODE:
487 return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0;
488 case AMDGPU_TPX_PARTITION_MODE:
489 return (adev->gmc.num_mem_partitions == 1 ||
490 adev->gmc.num_mem_partitions == 3) &&
491 ((num_xcc % 3) == 0);
492 case AMDGPU_QPX_PARTITION_MODE:
493 num_xccs_per_xcp = num_xcc / 4;
494 return (adev->gmc.num_mem_partitions == 1 ||
495 adev->gmc.num_mem_partitions == 4) &&
496 (num_xccs_per_xcp >= 2);
497 case AMDGPU_CPX_PARTITION_MODE:
498 return ((num_xcc > 1) &&
499 (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) &&
500 (num_xcc % adev->gmc.num_mem_partitions) == 0);
508 static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
511 * Stop user queues and threads, and make sure GPU is empty of work.
514 if (flags & AMDGPU_XCP_OPS_KFD)
515 amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
520 static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
524 if (flags & AMDGPU_XCP_OPS_KFD) {
525 amdgpu_amdkfd_device_probe(xcp_mgr->adev);
526 amdgpu_amdkfd_device_init(xcp_mgr->adev);
527 /* If KFD init failed, return failure */
528 if (!xcp_mgr->adev->kfd.init_complete)
535 static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
536 int mode, int *num_xcps)
538 int num_xcc_per_xcp, num_xcc, ret;
539 struct amdgpu_device *adev;
542 adev = xcp_mgr->adev;
543 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
545 if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
546 mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
547 if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
549 "Invalid config, no compatible compute partition mode found, available memory partitions: %d",
550 adev->gmc.num_mem_partitions);
553 } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
555 "Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
556 amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
560 if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
561 flags |= AMDGPU_XCP_OPS_KFD;
563 if (flags & AMDGPU_XCP_OPS_KFD) {
564 ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
569 ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
573 num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
574 if (adev->gfx.funcs->switch_partition_mode)
575 adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
578 /* Init info about new xcps */
579 *num_xcps = num_xcc / num_xcc_per_xcp;
580 amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
582 ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
584 if (flags & AMDGPU_XCP_OPS_KFD)
585 amdgpu_amdkfd_unlock_kfd(adev);
590 static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
591 int xcc_id, uint8_t *mem_id)
593 /* memory/spatial modes validation check is already done */
594 *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
595 *mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
600 static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
601 struct amdgpu_xcp *xcp, uint8_t *mem_id)
603 struct amdgpu_numa_info numa_info;
604 struct amdgpu_device *adev;
608 adev = xcp_mgr->adev;
609 /* TODO: BIOS is not returning the right info now
610 * Check on this later
613 if (adev->gmc.gmc_funcs->query_mem_partition_mode)
614 mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
616 if (adev->gmc.num_mem_partitions == 1) {
622 r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
626 xcc_id = ffs(xcc_mask) - 1;
627 if (!adev->gmc.is_app_apu)
628 return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
630 r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
636 for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
637 if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
647 static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
648 enum AMDGPU_XCP_IP_BLOCK ip_id,
649 struct amdgpu_xcp_ip *ip)
654 return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
657 struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
658 .switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
659 .query_partition_mode = &aqua_vanjaram_query_partition_mode,
660 .get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
661 .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
662 .select_scheds = &aqua_vanjaram_select_scheds,
663 .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list
666 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
670 if (amdgpu_sriov_vf(adev))
671 aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL;
673 ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
674 &aqua_vanjaram_xcp_funcs);
678 /* TODO: Default memory node affinity init */
683 int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
685 u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask;
688 /* generally 1 AID supports 4 instances */
689 adev->sdma.num_inst_per_aid = 4;
690 adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
692 adev->aid_mask = i = 1;
693 inst_mask >>= adev->sdma.num_inst_per_aid;
695 for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
696 inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
697 avail_inst = inst_mask & mask;
698 if (avail_inst == mask || avail_inst == 0x3 ||
700 adev->aid_mask |= (1 << i);
703 /* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
704 * addressed based on logical instance ids.
706 adev->vcn.harvest_config = 0;
707 adev->vcn.num_inst_per_aid = 1;
708 adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
709 adev->jpeg.harvest_config = 0;
710 adev->jpeg.num_inst_per_aid = 1;
711 adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
713 ret = aqua_vanjaram_xcp_mgr_init(adev);
717 aqua_vanjaram_ip_map_init(adev);
722 static void aqua_read_smn(struct amdgpu_device *adev,
723 struct amdgpu_smn_reg_data *regdata,
726 regdata->addr = smn_addr;
727 regdata->value = RREG32_PCIE(smn_addr);
730 struct aqua_reg_list {
736 #define DW_ADDR_INCR 4
738 static void aqua_read_smn_ext(struct amdgpu_device *adev,
739 struct amdgpu_smn_reg_data *regdata,
740 uint64_t smn_addr, int i)
743 smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
744 regdata->value = RREG32_PCIE_EXT(regdata->addr);
747 #define smnreg_0x1A340218 0x1A340218
748 #define smnreg_0x1A3402E4 0x1A3402E4
749 #define smnreg_0x1A340294 0x1A340294
750 #define smreg_0x1A380088 0x1A380088
752 #define NUM_PCIE_SMN_REGS 14
754 static struct aqua_reg_list pcie_reg_addrs[] = {
755 { smnreg_0x1A340218, 1, 0 },
756 { smnreg_0x1A3402E4, 1, 0 },
757 { smnreg_0x1A340294, 6, DW_ADDR_INCR },
758 { smreg_0x1A380088, 6, DW_ADDR_INCR },
761 static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
762 void *buf, size_t max_size)
764 struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
765 uint32_t start_addr, incrx, num_regs, szbuf;
766 struct amdgpu_regs_pcie_v1_0 *pcie_regs;
767 struct amdgpu_smn_reg_data *reg_data;
768 struct pci_dev *us_pdev, *ds_pdev;
771 if (!buf || !max_size)
774 pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
776 szbuf = sizeof(*pcie_reg_state) +
777 amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
778 /* Only one instance of pcie regs */
779 if (max_size < szbuf)
782 pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
783 sizeof(*pcie_reg_state));
784 pcie_regs->inst_header.instance = 0;
785 pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
786 pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
788 reg_data = pcie_regs->smn_reg_values;
790 for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
791 start_addr = pcie_reg_addrs[r].start_addr;
792 incrx = pcie_reg_addrs[r].incrx;
793 num_regs = pcie_reg_addrs[r].num_regs;
794 for (n = 0; n < num_regs; n++) {
795 aqua_read_smn(adev, reg_data, start_addr + n * incrx);
800 ds_pdev = pci_upstream_bridge(adev->pdev);
801 us_pdev = pci_upstream_bridge(ds_pdev);
803 pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
804 &pcie_regs->device_status);
805 pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
806 &pcie_regs->link_status);
808 aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
810 pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
811 &pcie_regs->pcie_corr_err_status);
812 pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
813 &pcie_regs->pcie_uncorr_err_status);
816 pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
817 &pcie_regs->sub_bus_number_latency);
819 pcie_reg_state->common_header.structure_size = szbuf;
820 pcie_reg_state->common_header.format_revision = 1;
821 pcie_reg_state->common_header.content_revision = 0;
822 pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
823 pcie_reg_state->common_header.num_instances = 1;
825 return pcie_reg_state->common_header.structure_size;
828 #define smnreg_0x11A00050 0x11A00050
829 #define smnreg_0x11A00180 0x11A00180
830 #define smnreg_0x11A00070 0x11A00070
831 #define smnreg_0x11A00200 0x11A00200
832 #define smnreg_0x11A0020C 0x11A0020C
833 #define smnreg_0x11A00210 0x11A00210
834 #define smnreg_0x11A00108 0x11A00108
836 #define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
838 #define NUM_XGMI_SMN_REGS 25
840 static struct aqua_reg_list xgmi_reg_addrs[] = {
841 { smnreg_0x11A00050, 1, 0 },
842 { smnreg_0x11A00180, 16, DW_ADDR_INCR },
843 { smnreg_0x11A00070, 4, DW_ADDR_INCR },
844 { smnreg_0x11A00200, 1, 0 },
845 { smnreg_0x11A0020C, 1, 0 },
846 { smnreg_0x11A00210, 1, 0 },
847 { smnreg_0x11A00108, 1, 0 },
850 static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
851 void *buf, size_t max_size)
853 struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
854 uint32_t start_addr, incrx, num_regs, szbuf;
855 struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
856 struct amdgpu_smn_reg_data *reg_data;
857 const int max_xgmi_instances = 8;
858 int inst = 0, i, j, r, n;
859 const int xgmi_inst = 2;
862 if (!buf || !max_size)
865 xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
867 szbuf = sizeof(*xgmi_reg_state) +
868 amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
870 /* Only one instance of pcie regs */
871 if (max_size < szbuf)
874 p = &xgmi_reg_state->xgmi_state_regs[0];
875 for_each_inst(i, adev->aid_mask) {
876 for (j = 0; j < xgmi_inst; ++j) {
877 xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
878 xgmi_regs->inst_header.instance = inst++;
880 xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
881 xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
883 reg_data = xgmi_regs->smn_reg_values;
885 for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
886 start_addr = xgmi_reg_addrs[r].start_addr;
887 incrx = xgmi_reg_addrs[r].incrx;
888 num_regs = xgmi_reg_addrs[r].num_regs;
890 for (n = 0; n < num_regs; n++) {
893 XGMI_LINK_REG(start_addr, j) +
903 xgmi_reg_state->common_header.structure_size = szbuf;
904 xgmi_reg_state->common_header.format_revision = 1;
905 xgmi_reg_state->common_header.content_revision = 0;
906 xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
907 xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
909 return xgmi_reg_state->common_header.structure_size;
912 #define smnreg_0x11C00070 0x11C00070
913 #define smnreg_0x11C00210 0x11C00210
915 static struct aqua_reg_list wafl_reg_addrs[] = {
916 { smnreg_0x11C00070, 4, DW_ADDR_INCR },
917 { smnreg_0x11C00210, 1, 0 },
920 #define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
922 #define NUM_WAFL_SMN_REGS 5
924 static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
925 void *buf, size_t max_size)
927 struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
928 uint32_t start_addr, incrx, num_regs, szbuf;
929 struct amdgpu_regs_wafl_v1_0 *wafl_regs;
930 struct amdgpu_smn_reg_data *reg_data;
931 const int max_wafl_instances = 8;
932 int inst = 0, i, j, r, n;
933 const int wafl_inst = 2;
936 if (!buf || !max_size)
939 wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
941 szbuf = sizeof(*wafl_reg_state) +
942 amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
945 if (max_size < szbuf)
948 p = &wafl_reg_state->wafl_state_regs[0];
949 for_each_inst(i, adev->aid_mask) {
950 for (j = 0; j < wafl_inst; ++j) {
951 wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
952 wafl_regs->inst_header.instance = inst++;
954 wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
955 wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
957 reg_data = wafl_regs->smn_reg_values;
959 for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
960 start_addr = wafl_reg_addrs[r].start_addr;
961 incrx = wafl_reg_addrs[r].incrx;
962 num_regs = wafl_reg_addrs[r].num_regs;
963 for (n = 0; n < num_regs; n++) {
966 WAFL_LINK_REG(start_addr, j) +
976 wafl_reg_state->common_header.structure_size = szbuf;
977 wafl_reg_state->common_header.format_revision = 1;
978 wafl_reg_state->common_header.content_revision = 0;
979 wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
980 wafl_reg_state->common_header.num_instances = max_wafl_instances;
982 return wafl_reg_state->common_header.structure_size;
985 #define smnreg_0x1B311060 0x1B311060
986 #define smnreg_0x1B411060 0x1B411060
987 #define smnreg_0x1B511060 0x1B511060
988 #define smnreg_0x1B611060 0x1B611060
990 #define smnreg_0x1C307120 0x1C307120
991 #define smnreg_0x1C317120 0x1C317120
993 #define smnreg_0x1C320830 0x1C320830
994 #define smnreg_0x1C380830 0x1C380830
995 #define smnreg_0x1C3D0830 0x1C3D0830
996 #define smnreg_0x1C420830 0x1C420830
998 #define smnreg_0x1C320100 0x1C320100
999 #define smnreg_0x1C380100 0x1C380100
1000 #define smnreg_0x1C3D0100 0x1C3D0100
1001 #define smnreg_0x1C420100 0x1C420100
1003 #define smnreg_0x1B310500 0x1B310500
1004 #define smnreg_0x1C300400 0x1C300400
1006 #define USR_CAKE_INCR 0x11000
1007 #define USR_LINK_INCR 0x100000
1008 #define USR_CP_INCR 0x10000
1010 #define NUM_USR_SMN_REGS 20
1012 struct aqua_reg_list usr_reg_addrs[] = {
1013 { smnreg_0x1B311060, 4, DW_ADDR_INCR },
1014 { smnreg_0x1B411060, 4, DW_ADDR_INCR },
1015 { smnreg_0x1B511060, 4, DW_ADDR_INCR },
1016 { smnreg_0x1B611060, 4, DW_ADDR_INCR },
1017 { smnreg_0x1C307120, 2, DW_ADDR_INCR },
1018 { smnreg_0x1C317120, 2, DW_ADDR_INCR },
1021 #define NUM_USR1_SMN_REGS 46
1022 struct aqua_reg_list usr1_reg_addrs[] = {
1023 { smnreg_0x1C320830, 6, USR_CAKE_INCR },
1024 { smnreg_0x1C380830, 5, USR_CAKE_INCR },
1025 { smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
1026 { smnreg_0x1C420830, 4, USR_CAKE_INCR },
1027 { smnreg_0x1C320100, 6, USR_CAKE_INCR },
1028 { smnreg_0x1C380100, 5, USR_CAKE_INCR },
1029 { smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
1030 { smnreg_0x1C420100, 4, USR_CAKE_INCR },
1031 { smnreg_0x1B310500, 4, USR_LINK_INCR },
1032 { smnreg_0x1C300400, 2, USR_CP_INCR },
1035 static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
1036 void *buf, size_t max_size,
1039 uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
1040 struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
1041 struct amdgpu_regs_usr_v1_0 *usr_regs;
1042 struct amdgpu_smn_reg_data *reg_data;
1043 const int max_usr_instances = 4;
1044 struct aqua_reg_list *reg_addrs;
1045 int inst = 0, i, n, r, arr_size;
1048 if (!buf || !max_size)
1051 switch (reg_state) {
1052 case AMDGPU_REG_STATE_TYPE_USR:
1053 arr_size = ARRAY_SIZE(usr_reg_addrs);
1054 reg_addrs = usr_reg_addrs;
1055 num_smn = NUM_USR_SMN_REGS;
1057 case AMDGPU_REG_STATE_TYPE_USR_1:
1058 arr_size = ARRAY_SIZE(usr1_reg_addrs);
1059 reg_addrs = usr1_reg_addrs;
1060 num_smn = NUM_USR1_SMN_REGS;
1066 usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
1068 szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
1071 if (max_size < szbuf)
1074 p = &usr_reg_state->usr_state_regs[0];
1075 for_each_inst(i, adev->aid_mask) {
1076 usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
1077 usr_regs->inst_header.instance = inst++;
1078 usr_regs->inst_header.state = AMDGPU_INST_S_OK;
1079 usr_regs->inst_header.num_smn_regs = num_smn;
1080 reg_data = usr_regs->smn_reg_values;
1082 for (r = 0; r < arr_size; r++) {
1083 start_addr = reg_addrs[r].start_addr;
1084 incrx = reg_addrs[r].incrx;
1085 num_regs = reg_addrs[r].num_regs;
1086 for (n = 0; n < num_regs; n++) {
1087 aqua_read_smn_ext(adev, reg_data,
1088 start_addr + n * incrx, i);
1095 usr_reg_state->common_header.structure_size = szbuf;
1096 usr_reg_state->common_header.format_revision = 1;
1097 usr_reg_state->common_header.content_revision = 0;
1098 usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
1099 usr_reg_state->common_header.num_instances = max_usr_instances;
1101 return usr_reg_state->common_header.structure_size;
1104 ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
1105 enum amdgpu_reg_state reg_state, void *buf,
1110 switch (reg_state) {
1111 case AMDGPU_REG_STATE_TYPE_PCIE:
1112 size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
1114 case AMDGPU_REG_STATE_TYPE_XGMI:
1115 size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
1117 case AMDGPU_REG_STATE_TYPE_WAFL:
1118 size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
1120 case AMDGPU_REG_STATE_TYPE_USR:
1121 size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
1122 AMDGPU_REG_STATE_TYPE_USR);
1124 case AMDGPU_REG_STATE_TYPE_USR_1:
1125 size = aqua_vanjaram_read_usr_state(
1126 adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);