]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
Merge tag 'parisc-for-6.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller...
[linux.git] / drivers / gpu / drm / amd / amdgpu / aqua_vanjaram.c
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "soc15.h"
25
26 #include "soc15_common.h"
27 #include "amdgpu_reg_state.h"
28 #include "amdgpu_xcp.h"
29 #include "gfx_v9_4_3.h"
30 #include "gfxhub_v1_2.h"
31 #include "sdma_v4_4_2.h"
32
33 #define XCP_INST_MASK(num_inst, xcp_id)                                        \
34         (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
35
36 #define AMDGPU_XCP_OPS_KFD      (1 << 0)
37
38 void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
39 {
40         int i;
41
42         adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
43
44         adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
45
46         adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
47         adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
48         adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
49
50         adev->doorbell_index.sdma_doorbell_range = 20;
51         for (i = 0; i < adev->sdma.num_instances; i++)
52                 adev->doorbell_index.sdma_engine[i] =
53                         AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
54                         i * (adev->doorbell_index.sdma_doorbell_range >> 1);
55
56         adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
57         adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
58
59         adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
60         adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
61
62         adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
63 }
64
65 static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
66 {
67         return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
68 }
69
70 static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
71                              uint32_t inst_idx, struct amdgpu_ring *ring)
72 {
73         int xcp_id;
74         enum AMDGPU_XCP_IP_BLOCK ip_blk;
75         uint32_t inst_mask;
76
77         ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
78         if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
79                 return;
80
81         inst_mask = 1 << inst_idx;
82
83         switch (ring->funcs->type) {
84         case AMDGPU_HW_IP_GFX:
85         case AMDGPU_RING_TYPE_COMPUTE:
86         case AMDGPU_RING_TYPE_KIQ:
87                 ip_blk = AMDGPU_XCP_GFX;
88                 break;
89         case AMDGPU_RING_TYPE_SDMA:
90                 ip_blk = AMDGPU_XCP_SDMA;
91                 break;
92         case AMDGPU_RING_TYPE_VCN_ENC:
93         case AMDGPU_RING_TYPE_VCN_JPEG:
94                 ip_blk = AMDGPU_XCP_VCN;
95                 if (aqua_vanjaram_xcp_vcn_shared(adev))
96                         inst_mask = 1 << (inst_idx * 2);
97                 break;
98         default:
99                 DRM_ERROR("Not support ring type %d!", ring->funcs->type);
100                 return;
101         }
102
103         for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
104                 if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
105                         ring->xcp_id = xcp_id;
106                         break;
107                 }
108         }
109 }
110
111 static void aqua_vanjaram_xcp_gpu_sched_update(
112                 struct amdgpu_device *adev,
113                 struct amdgpu_ring *ring,
114                 unsigned int sel_xcp_id)
115 {
116         unsigned int *num_gpu_sched;
117
118         num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
119                         .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
120         adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
121                         .sched[(*num_gpu_sched)++] = &ring->sched;
122         DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
123                         sel_xcp_id, ring->funcs->type,
124                         ring->hw_prio, *num_gpu_sched);
125 }
126
127 static int aqua_vanjaram_xcp_sched_list_update(
128                 struct amdgpu_device *adev)
129 {
130         struct amdgpu_ring *ring;
131         int i;
132
133         for (i = 0; i < MAX_XCP; i++) {
134                 atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
135                 memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
136         }
137
138         if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
139                 return 0;
140
141         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
142                 ring = adev->rings[i];
143                 if (!ring || !ring->sched.ready || ring->no_scheduler)
144                         continue;
145
146                 aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
147
148                 /* VCN may be shared by two partitions under CPX MODE in certain
149                  * configs.
150                  */
151                 if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
152                      ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
153                     aqua_vanjaram_xcp_vcn_shared(adev))
154                         aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
155         }
156
157         return 0;
158 }
159
160 static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
161 {
162         int i;
163
164         for (i = 0; i < adev->num_rings; i++) {
165                 struct amdgpu_ring *ring = adev->rings[i];
166
167                 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
168                         ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
169                         aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
170                 else
171                         aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
172         }
173
174         return aqua_vanjaram_xcp_sched_list_update(adev);
175 }
176
177 static int aqua_vanjaram_select_scheds(
178                 struct amdgpu_device *adev,
179                 u32 hw_ip,
180                 u32 hw_prio,
181                 struct amdgpu_fpriv *fpriv,
182                 unsigned int *num_scheds,
183                 struct drm_gpu_scheduler ***scheds)
184 {
185         u32 sel_xcp_id;
186         int i;
187
188         if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
189                 u32 least_ref_cnt = ~0;
190
191                 fpriv->xcp_id = 0;
192                 for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
193                         u32 total_ref_cnt;
194
195                         total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
196                         if (total_ref_cnt < least_ref_cnt) {
197                                 fpriv->xcp_id = i;
198                                 least_ref_cnt = total_ref_cnt;
199                         }
200                 }
201         }
202         sel_xcp_id = fpriv->xcp_id;
203
204         if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
205                 *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
206                 *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
207                 atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
208                 DRM_DEBUG("Selected partition #%d", sel_xcp_id);
209         } else {
210                 DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
211                 return -ENOENT;
212         }
213
214         return 0;
215 }
216
217 static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
218                                          enum amd_hw_ip_block_type block,
219                                          int8_t inst)
220 {
221         int8_t dev_inst;
222
223         switch (block) {
224         case GC_HWIP:
225         case SDMA0_HWIP:
226         /* Both JPEG and VCN as JPEG is only alias of VCN */
227         case VCN_HWIP:
228                 dev_inst = adev->ip_map.dev_inst[block][inst];
229                 break;
230         default:
231                 /* For rest of the IPs, no look up required.
232                  * Assume 'logical instance == physical instance' for all configs. */
233                 dev_inst = inst;
234                 break;
235         }
236
237         return dev_inst;
238 }
239
240 static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
241                                          enum amd_hw_ip_block_type block,
242                                          uint32_t mask)
243 {
244         uint32_t dev_mask = 0;
245         int8_t log_inst, dev_inst;
246
247         while (mask) {
248                 log_inst = ffs(mask) - 1;
249                 dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
250                 dev_mask |= (1 << dev_inst);
251                 mask &= ~(1 << log_inst);
252         }
253
254         return dev_mask;
255 }
256
257 static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
258                                           enum amd_hw_ip_block_type ip_block,
259                                           uint32_t inst_mask)
260 {
261         int l = 0, i;
262
263         while (inst_mask) {
264                 i = ffs(inst_mask) - 1;
265                 adev->ip_map.dev_inst[ip_block][l++] = i;
266                 inst_mask &= ~(1 << i);
267         }
268         for (; l < HWIP_MAX_INSTANCE; l++)
269                 adev->ip_map.dev_inst[ip_block][l] = -1;
270 }
271
272 void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
273 {
274         u32 ip_map[][2] = {
275                 { GC_HWIP, adev->gfx.xcc_mask },
276                 { SDMA0_HWIP, adev->sdma.sdma_mask },
277                 { VCN_HWIP, adev->vcn.inst_mask },
278         };
279         int i;
280
281         for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
282                 aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
283
284         adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
285         adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
286 }
287
288 /* Fixed pattern for smn addressing on different AIDs:
289  *   bit[34]: indicate cross AID access
290  *   bit[33:32]: indicate target AID id
291  * AID id range is 0 ~ 3 as maximum AID number is 4.
292  */
293 u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
294 {
295         u64 ext_offset;
296
297         /* local routing and bit[34:32] will be zeros */
298         if (ext_id == 0)
299                 return 0;
300
301         /* Initiated from host, accessing to all non-zero aids are cross traffic */
302         ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
303
304         return ext_offset;
305 }
306
307 static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
308 {
309         enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
310         struct amdgpu_device *adev = xcp_mgr->adev;
311
312         if (adev->nbio.funcs->get_compute_partition_mode)
313                 mode = adev->nbio.funcs->get_compute_partition_mode(adev);
314
315         return mode;
316 }
317
318 static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
319 {
320         int num_xcc, num_xcc_per_xcp = 0;
321
322         num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
323
324         switch (mode) {
325         case AMDGPU_SPX_PARTITION_MODE:
326                 num_xcc_per_xcp = num_xcc;
327                 break;
328         case AMDGPU_DPX_PARTITION_MODE:
329                 num_xcc_per_xcp = num_xcc / 2;
330                 break;
331         case AMDGPU_TPX_PARTITION_MODE:
332                 num_xcc_per_xcp = num_xcc / 3;
333                 break;
334         case AMDGPU_QPX_PARTITION_MODE:
335                 num_xcc_per_xcp = num_xcc / 4;
336                 break;
337         case AMDGPU_CPX_PARTITION_MODE:
338                 num_xcc_per_xcp = 1;
339                 break;
340         }
341
342         return num_xcc_per_xcp;
343 }
344
345 static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
346                                     enum AMDGPU_XCP_IP_BLOCK ip_id,
347                                     struct amdgpu_xcp_ip *ip)
348 {
349         struct amdgpu_device *adev = xcp_mgr->adev;
350         int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
351         int num_sdma, num_vcn;
352
353         num_sdma = adev->sdma.num_instances;
354         num_vcn = adev->vcn.num_vcn_inst;
355
356         switch (xcp_mgr->mode) {
357         case AMDGPU_SPX_PARTITION_MODE:
358                 num_sdma_xcp = num_sdma;
359                 num_vcn_xcp = num_vcn;
360                 break;
361         case AMDGPU_DPX_PARTITION_MODE:
362                 num_sdma_xcp = num_sdma / 2;
363                 num_vcn_xcp = num_vcn / 2;
364                 break;
365         case AMDGPU_TPX_PARTITION_MODE:
366                 num_sdma_xcp = num_sdma / 3;
367                 num_vcn_xcp = num_vcn / 3;
368                 break;
369         case AMDGPU_QPX_PARTITION_MODE:
370                 num_sdma_xcp = num_sdma / 4;
371                 num_vcn_xcp = num_vcn / 4;
372                 break;
373         case AMDGPU_CPX_PARTITION_MODE:
374                 num_sdma_xcp = 2;
375                 num_vcn_xcp = num_vcn ? 1 : 0;
376                 break;
377         default:
378                 return -EINVAL;
379         }
380
381         num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
382
383         switch (ip_id) {
384         case AMDGPU_XCP_GFXHUB:
385                 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
386                 ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
387                 break;
388         case AMDGPU_XCP_GFX:
389                 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
390                 ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
391                 break;
392         case AMDGPU_XCP_SDMA:
393                 ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
394                 ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
395                 break;
396         case AMDGPU_XCP_VCN:
397                 ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id);
398                 /* TODO : Assign IP funcs */
399                 break;
400         default:
401                 return -EINVAL;
402         }
403
404         ip->ip_id = ip_id;
405
406         return 0;
407 }
408
409 static enum amdgpu_gfx_partition
410 __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
411 {
412         struct amdgpu_device *adev = xcp_mgr->adev;
413         int num_xcc;
414
415         num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
416
417         if (adev->gmc.num_mem_partitions == 1)
418                 return AMDGPU_SPX_PARTITION_MODE;
419
420         if (adev->gmc.num_mem_partitions == num_xcc)
421                 return AMDGPU_CPX_PARTITION_MODE;
422
423         if (adev->gmc.num_mem_partitions == num_xcc / 2)
424                 return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
425                                                     AMDGPU_QPX_PARTITION_MODE;
426
427         if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
428                 return AMDGPU_DPX_PARTITION_MODE;
429
430         return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
431 }
432
433 static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
434                                           enum amdgpu_gfx_partition mode)
435 {
436         struct amdgpu_device *adev = xcp_mgr->adev;
437         int num_xcc, num_xccs_per_xcp;
438
439         num_xcc = NUM_XCC(adev->gfx.xcc_mask);
440         switch (mode) {
441         case AMDGPU_SPX_PARTITION_MODE:
442                 return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
443         case AMDGPU_DPX_PARTITION_MODE:
444                 return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0;
445         case AMDGPU_TPX_PARTITION_MODE:
446                 return (adev->gmc.num_mem_partitions == 1 ||
447                         adev->gmc.num_mem_partitions == 3) &&
448                        ((num_xcc % 3) == 0);
449         case AMDGPU_QPX_PARTITION_MODE:
450                 num_xccs_per_xcp = num_xcc / 4;
451                 return (adev->gmc.num_mem_partitions == 1 ||
452                         adev->gmc.num_mem_partitions == 4) &&
453                        (num_xccs_per_xcp >= 2);
454         case AMDGPU_CPX_PARTITION_MODE:
455                 return ((num_xcc > 1) &&
456                        (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) &&
457                        (num_xcc % adev->gmc.num_mem_partitions) == 0);
458         default:
459                 return false;
460         }
461
462         return false;
463 }
464
465 static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
466 {
467         /* TODO:
468          * Stop user queues and threads, and make sure GPU is empty of work.
469          */
470
471         if (flags & AMDGPU_XCP_OPS_KFD)
472                 amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
473
474         return 0;
475 }
476
477 static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
478 {
479         int ret = 0;
480
481         if (flags & AMDGPU_XCP_OPS_KFD) {
482                 amdgpu_amdkfd_device_probe(xcp_mgr->adev);
483                 amdgpu_amdkfd_device_init(xcp_mgr->adev);
484                 /* If KFD init failed, return failure */
485                 if (!xcp_mgr->adev->kfd.init_complete)
486                         ret = -EIO;
487         }
488
489         return ret;
490 }
491
492 static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
493                                                int mode, int *num_xcps)
494 {
495         int num_xcc_per_xcp, num_xcc, ret;
496         struct amdgpu_device *adev;
497         u32 flags = 0;
498
499         adev = xcp_mgr->adev;
500         num_xcc = NUM_XCC(adev->gfx.xcc_mask);
501
502         if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
503                 mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
504         } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
505                 dev_err(adev->dev,
506                         "Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
507                         amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
508                 return -EINVAL;
509         }
510
511         if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
512                 flags |= AMDGPU_XCP_OPS_KFD;
513
514         if (flags & AMDGPU_XCP_OPS_KFD) {
515                 ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
516                 if (ret)
517                         goto out;
518         }
519
520         ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
521         if (ret)
522                 goto unlock;
523
524         num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
525         if (adev->gfx.funcs->switch_partition_mode)
526                 adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
527                                                        num_xcc_per_xcp);
528
529         /* Init info about new xcps */
530         *num_xcps = num_xcc / num_xcc_per_xcp;
531         amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
532
533         ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
534 unlock:
535         if (flags & AMDGPU_XCP_OPS_KFD)
536                 amdgpu_amdkfd_unlock_kfd(adev);
537 out:
538         return ret;
539 }
540
541 static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
542                                           int xcc_id, uint8_t *mem_id)
543 {
544         /* memory/spatial modes validation check is already done */
545         *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
546         *mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
547
548         return 0;
549 }
550
551 static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
552                                         struct amdgpu_xcp *xcp, uint8_t *mem_id)
553 {
554         struct amdgpu_numa_info numa_info;
555         struct amdgpu_device *adev;
556         uint32_t xcc_mask;
557         int r, i, xcc_id;
558
559         adev = xcp_mgr->adev;
560         /* TODO: BIOS is not returning the right info now
561          * Check on this later
562          */
563         /*
564         if (adev->gmc.gmc_funcs->query_mem_partition_mode)
565                 mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
566         */
567         if (adev->gmc.num_mem_partitions == 1) {
568                 /* Only one range */
569                 *mem_id = 0;
570                 return 0;
571         }
572
573         r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
574         if (r || !xcc_mask)
575                 return -EINVAL;
576
577         xcc_id = ffs(xcc_mask) - 1;
578         if (!adev->gmc.is_app_apu)
579                 return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
580
581         r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
582
583         if (r)
584                 return r;
585
586         r = -EINVAL;
587         for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
588                 if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
589                         *mem_id = i;
590                         r = 0;
591                         break;
592                 }
593         }
594
595         return r;
596 }
597
598 static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
599                                      enum AMDGPU_XCP_IP_BLOCK ip_id,
600                                      struct amdgpu_xcp_ip *ip)
601 {
602         if (!ip)
603                 return -EINVAL;
604
605         return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
606 }
607
608 struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
609         .switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
610         .query_partition_mode = &aqua_vanjaram_query_partition_mode,
611         .get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
612         .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
613         .select_scheds = &aqua_vanjaram_select_scheds,
614         .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list
615 };
616
617 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
618 {
619         int ret;
620
621         ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
622                                   &aqua_vanjaram_xcp_funcs);
623         if (ret)
624                 return ret;
625
626         /* TODO: Default memory node affinity init */
627
628         return ret;
629 }
630
631 int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
632 {
633         u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask;
634         int ret, i;
635
636         /* generally 1 AID supports 4 instances */
637         adev->sdma.num_inst_per_aid = 4;
638         adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
639
640         adev->aid_mask = i = 1;
641         inst_mask >>= adev->sdma.num_inst_per_aid;
642
643         for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
644              inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
645                 avail_inst = inst_mask & mask;
646                 if (avail_inst == mask || avail_inst == 0x3 ||
647                     avail_inst == 0xc)
648                         adev->aid_mask |= (1 << i);
649         }
650
651         /* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
652          * addressed based on logical instance ids.
653          */
654         adev->vcn.harvest_config = 0;
655         adev->vcn.num_inst_per_aid = 1;
656         adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
657         adev->jpeg.harvest_config = 0;
658         adev->jpeg.num_inst_per_aid = 1;
659         adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
660
661         ret = aqua_vanjaram_xcp_mgr_init(adev);
662         if (ret)
663                 return ret;
664
665         aqua_vanjaram_ip_map_init(adev);
666
667         return 0;
668 }
669
670 static void aqua_read_smn(struct amdgpu_device *adev,
671                           struct amdgpu_smn_reg_data *regdata,
672                           uint64_t smn_addr)
673 {
674         regdata->addr = smn_addr;
675         regdata->value = RREG32_PCIE(smn_addr);
676 }
677
678 struct aqua_reg_list {
679         uint64_t start_addr;
680         uint32_t num_regs;
681         uint32_t incrx;
682 };
683
684 #define DW_ADDR_INCR    4
685
686 static void aqua_read_smn_ext(struct amdgpu_device *adev,
687                               struct amdgpu_smn_reg_data *regdata,
688                               uint64_t smn_addr, int i)
689 {
690         regdata->addr =
691                 smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
692         regdata->value = RREG32_PCIE_EXT(regdata->addr);
693 }
694
695 #define smnreg_0x1A340218       0x1A340218
696 #define smnreg_0x1A3402E4       0x1A3402E4
697 #define smnreg_0x1A340294       0x1A340294
698 #define smreg_0x1A380088        0x1A380088
699
700 #define NUM_PCIE_SMN_REGS       14
701
702 static struct aqua_reg_list pcie_reg_addrs[] = {
703         { smnreg_0x1A340218, 1, 0 },
704         { smnreg_0x1A3402E4, 1, 0 },
705         { smnreg_0x1A340294, 6, DW_ADDR_INCR },
706         { smreg_0x1A380088, 6, DW_ADDR_INCR },
707 };
708
709 static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
710                                              void *buf, size_t max_size)
711 {
712         struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
713         uint32_t start_addr, incrx, num_regs, szbuf;
714         struct amdgpu_regs_pcie_v1_0 *pcie_regs;
715         struct amdgpu_smn_reg_data *reg_data;
716         struct pci_dev *us_pdev, *ds_pdev;
717         int aer_cap, r, n;
718
719         if (!buf || !max_size)
720                 return -EINVAL;
721
722         pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
723
724         szbuf = sizeof(*pcie_reg_state) +
725                 amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
726         /* Only one instance of pcie regs */
727         if (max_size < szbuf)
728                 return -EOVERFLOW;
729
730         pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
731                                                      sizeof(*pcie_reg_state));
732         pcie_regs->inst_header.instance = 0;
733         pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
734         pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
735
736         reg_data = pcie_regs->smn_reg_values;
737
738         for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
739                 start_addr = pcie_reg_addrs[r].start_addr;
740                 incrx = pcie_reg_addrs[r].incrx;
741                 num_regs = pcie_reg_addrs[r].num_regs;
742                 for (n = 0; n < num_regs; n++) {
743                         aqua_read_smn(adev, reg_data, start_addr + n * incrx);
744                         ++reg_data;
745                 }
746         }
747
748         ds_pdev = pci_upstream_bridge(adev->pdev);
749         us_pdev = pci_upstream_bridge(ds_pdev);
750
751         pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
752                                   &pcie_regs->device_status);
753         pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
754                                   &pcie_regs->link_status);
755
756         aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
757         if (aer_cap) {
758                 pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
759                                       &pcie_regs->pcie_corr_err_status);
760                 pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
761                                       &pcie_regs->pcie_uncorr_err_status);
762         }
763
764         pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
765                               &pcie_regs->sub_bus_number_latency);
766
767         pcie_reg_state->common_header.structure_size = szbuf;
768         pcie_reg_state->common_header.format_revision = 1;
769         pcie_reg_state->common_header.content_revision = 0;
770         pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
771         pcie_reg_state->common_header.num_instances = 1;
772
773         return pcie_reg_state->common_header.structure_size;
774 }
775
776 #define smnreg_0x11A00050       0x11A00050
777 #define smnreg_0x11A00180       0x11A00180
778 #define smnreg_0x11A00070       0x11A00070
779 #define smnreg_0x11A00200       0x11A00200
780 #define smnreg_0x11A0020C       0x11A0020C
781 #define smnreg_0x11A00210       0x11A00210
782 #define smnreg_0x11A00108       0x11A00108
783
784 #define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
785
786 #define NUM_XGMI_SMN_REGS 25
787
788 static struct aqua_reg_list xgmi_reg_addrs[] = {
789         { smnreg_0x11A00050, 1, 0 },
790         { smnreg_0x11A00180, 16, DW_ADDR_INCR },
791         { smnreg_0x11A00070, 4, DW_ADDR_INCR },
792         { smnreg_0x11A00200, 1, 0 },
793         { smnreg_0x11A0020C, 1, 0 },
794         { smnreg_0x11A00210, 1, 0 },
795         { smnreg_0x11A00108, 1, 0 },
796 };
797
798 static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
799                                              void *buf, size_t max_size)
800 {
801         struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
802         uint32_t start_addr, incrx, num_regs, szbuf;
803         struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
804         struct amdgpu_smn_reg_data *reg_data;
805         const int max_xgmi_instances = 8;
806         int inst = 0, i, j, r, n;
807         const int xgmi_inst = 2;
808         void *p;
809
810         if (!buf || !max_size)
811                 return -EINVAL;
812
813         xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
814
815         szbuf = sizeof(*xgmi_reg_state) +
816                 amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
817                                     NUM_XGMI_SMN_REGS);
818         /* Only one instance of pcie regs */
819         if (max_size < szbuf)
820                 return -EOVERFLOW;
821
822         p = &xgmi_reg_state->xgmi_state_regs[0];
823         for_each_inst(i, adev->aid_mask) {
824                 for (j = 0; j < xgmi_inst; ++j) {
825                         xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
826                         xgmi_regs->inst_header.instance = inst++;
827
828                         xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
829                         xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
830
831                         reg_data = xgmi_regs->smn_reg_values;
832
833                         for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
834                                 start_addr = xgmi_reg_addrs[r].start_addr;
835                                 incrx = xgmi_reg_addrs[r].incrx;
836                                 num_regs = xgmi_reg_addrs[r].num_regs;
837
838                                 for (n = 0; n < num_regs; n++) {
839                                         aqua_read_smn_ext(
840                                                 adev, reg_data,
841                                                 XGMI_LINK_REG(start_addr, j) +
842                                                         n * incrx,
843                                                 i);
844                                         ++reg_data;
845                                 }
846                         }
847                         p = reg_data;
848                 }
849         }
850
851         xgmi_reg_state->common_header.structure_size = szbuf;
852         xgmi_reg_state->common_header.format_revision = 1;
853         xgmi_reg_state->common_header.content_revision = 0;
854         xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
855         xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
856
857         return xgmi_reg_state->common_header.structure_size;
858 }
859
860 #define smnreg_0x11C00070       0x11C00070
861 #define smnreg_0x11C00210       0x11C00210
862
863 static struct aqua_reg_list wafl_reg_addrs[] = {
864         { smnreg_0x11C00070, 4, DW_ADDR_INCR },
865         { smnreg_0x11C00210, 1, 0 },
866 };
867
868 #define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
869
870 #define NUM_WAFL_SMN_REGS 5
871
872 static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
873                                              void *buf, size_t max_size)
874 {
875         struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
876         uint32_t start_addr, incrx, num_regs, szbuf;
877         struct amdgpu_regs_wafl_v1_0 *wafl_regs;
878         struct amdgpu_smn_reg_data *reg_data;
879         const int max_wafl_instances = 8;
880         int inst = 0, i, j, r, n;
881         const int wafl_inst = 2;
882         void *p;
883
884         if (!buf || !max_size)
885                 return -EINVAL;
886
887         wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
888
889         szbuf = sizeof(*wafl_reg_state) +
890                 amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
891                                     NUM_WAFL_SMN_REGS);
892
893         if (max_size < szbuf)
894                 return -EOVERFLOW;
895
896         p = &wafl_reg_state->wafl_state_regs[0];
897         for_each_inst(i, adev->aid_mask) {
898                 for (j = 0; j < wafl_inst; ++j) {
899                         wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
900                         wafl_regs->inst_header.instance = inst++;
901
902                         wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
903                         wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
904
905                         reg_data = wafl_regs->smn_reg_values;
906
907                         for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
908                                 start_addr = wafl_reg_addrs[r].start_addr;
909                                 incrx = wafl_reg_addrs[r].incrx;
910                                 num_regs = wafl_reg_addrs[r].num_regs;
911                                 for (n = 0; n < num_regs; n++) {
912                                         aqua_read_smn_ext(
913                                                 adev, reg_data,
914                                                 WAFL_LINK_REG(start_addr, j) +
915                                                         n * incrx,
916                                                 i);
917                                         ++reg_data;
918                                 }
919                         }
920                         p = reg_data;
921                 }
922         }
923
924         wafl_reg_state->common_header.structure_size = szbuf;
925         wafl_reg_state->common_header.format_revision = 1;
926         wafl_reg_state->common_header.content_revision = 0;
927         wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
928         wafl_reg_state->common_header.num_instances = max_wafl_instances;
929
930         return wafl_reg_state->common_header.structure_size;
931 }
932
933 #define smnreg_0x1B311060 0x1B311060
934 #define smnreg_0x1B411060 0x1B411060
935 #define smnreg_0x1B511060 0x1B511060
936 #define smnreg_0x1B611060 0x1B611060
937
938 #define smnreg_0x1C307120 0x1C307120
939 #define smnreg_0x1C317120 0x1C317120
940
941 #define smnreg_0x1C320830 0x1C320830
942 #define smnreg_0x1C380830 0x1C380830
943 #define smnreg_0x1C3D0830 0x1C3D0830
944 #define smnreg_0x1C420830 0x1C420830
945
946 #define smnreg_0x1C320100 0x1C320100
947 #define smnreg_0x1C380100 0x1C380100
948 #define smnreg_0x1C3D0100 0x1C3D0100
949 #define smnreg_0x1C420100 0x1C420100
950
951 #define smnreg_0x1B310500 0x1B310500
952 #define smnreg_0x1C300400 0x1C300400
953
954 #define USR_CAKE_INCR 0x11000
955 #define USR_LINK_INCR 0x100000
956 #define USR_CP_INCR 0x10000
957
958 #define NUM_USR_SMN_REGS        20
959
960 struct aqua_reg_list usr_reg_addrs[] = {
961         { smnreg_0x1B311060, 4, DW_ADDR_INCR },
962         { smnreg_0x1B411060, 4, DW_ADDR_INCR },
963         { smnreg_0x1B511060, 4, DW_ADDR_INCR },
964         { smnreg_0x1B611060, 4, DW_ADDR_INCR },
965         { smnreg_0x1C307120, 2, DW_ADDR_INCR },
966         { smnreg_0x1C317120, 2, DW_ADDR_INCR },
967 };
968
969 #define NUM_USR1_SMN_REGS       46
970 struct aqua_reg_list usr1_reg_addrs[] = {
971         { smnreg_0x1C320830, 6, USR_CAKE_INCR },
972         { smnreg_0x1C380830, 5, USR_CAKE_INCR },
973         { smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
974         { smnreg_0x1C420830, 4, USR_CAKE_INCR },
975         { smnreg_0x1C320100, 6, USR_CAKE_INCR },
976         { smnreg_0x1C380100, 5, USR_CAKE_INCR },
977         { smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
978         { smnreg_0x1C420100, 4, USR_CAKE_INCR },
979         { smnreg_0x1B310500, 4, USR_LINK_INCR },
980         { smnreg_0x1C300400, 2, USR_CP_INCR },
981 };
982
983 static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
984                                             void *buf, size_t max_size,
985                                             int reg_state)
986 {
987         uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
988         struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
989         struct amdgpu_regs_usr_v1_0 *usr_regs;
990         struct amdgpu_smn_reg_data *reg_data;
991         const int max_usr_instances = 4;
992         struct aqua_reg_list *reg_addrs;
993         int inst = 0, i, n, r, arr_size;
994         void *p;
995
996         if (!buf || !max_size)
997                 return -EINVAL;
998
999         switch (reg_state) {
1000         case AMDGPU_REG_STATE_TYPE_USR:
1001                 arr_size = ARRAY_SIZE(usr_reg_addrs);
1002                 reg_addrs = usr_reg_addrs;
1003                 num_smn = NUM_USR_SMN_REGS;
1004                 break;
1005         case AMDGPU_REG_STATE_TYPE_USR_1:
1006                 arr_size = ARRAY_SIZE(usr1_reg_addrs);
1007                 reg_addrs = usr1_reg_addrs;
1008                 num_smn = NUM_USR1_SMN_REGS;
1009                 break;
1010         default:
1011                 return -EINVAL;
1012         }
1013
1014         usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
1015
1016         szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
1017                                                              sizeof(*usr_regs),
1018                                                              num_smn);
1019         if (max_size < szbuf)
1020                 return -EOVERFLOW;
1021
1022         p = &usr_reg_state->usr_state_regs[0];
1023         for_each_inst(i, adev->aid_mask) {
1024                 usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
1025                 usr_regs->inst_header.instance = inst++;
1026                 usr_regs->inst_header.state = AMDGPU_INST_S_OK;
1027                 usr_regs->inst_header.num_smn_regs = num_smn;
1028                 reg_data = usr_regs->smn_reg_values;
1029
1030                 for (r = 0; r < arr_size; r++) {
1031                         start_addr = reg_addrs[r].start_addr;
1032                         incrx = reg_addrs[r].incrx;
1033                         num_regs = reg_addrs[r].num_regs;
1034                         for (n = 0; n < num_regs; n++) {
1035                                 aqua_read_smn_ext(adev, reg_data,
1036                                                   start_addr + n * incrx, i);
1037                                 reg_data++;
1038                         }
1039                 }
1040                 p = reg_data;
1041         }
1042
1043         usr_reg_state->common_header.structure_size = szbuf;
1044         usr_reg_state->common_header.format_revision = 1;
1045         usr_reg_state->common_header.content_revision = 0;
1046         usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
1047         usr_reg_state->common_header.num_instances = max_usr_instances;
1048
1049         return usr_reg_state->common_header.structure_size;
1050 }
1051
1052 ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
1053                                     enum amdgpu_reg_state reg_state, void *buf,
1054                                     size_t max_size)
1055 {
1056         ssize_t size;
1057
1058         switch (reg_state) {
1059         case AMDGPU_REG_STATE_TYPE_PCIE:
1060                 size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
1061                 break;
1062         case AMDGPU_REG_STATE_TYPE_XGMI:
1063                 size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
1064                 break;
1065         case AMDGPU_REG_STATE_TYPE_WAFL:
1066                 size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
1067                 break;
1068         case AMDGPU_REG_STATE_TYPE_USR:
1069                 size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
1070                                                     AMDGPU_REG_STATE_TYPE_USR);
1071                 break;
1072         case AMDGPU_REG_STATE_TYPE_USR_1:
1073                 size = aqua_vanjaram_read_usr_state(
1074                         adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
1075                 break;
1076         default:
1077                 return -EINVAL;
1078         }
1079
1080         return size;
1081 }
This page took 0.098932 seconds and 4 git commands to generate.