]> Git Repo - J-linux.git/blob - drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
drm/amdgpu: use string choice helpers
[J-linux.git] / drivers / gpu / drm / amd / amdgpu / aqua_vanjaram.c
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "soc15.h"
25
26 #include "soc15_common.h"
27 #include "amdgpu_reg_state.h"
28 #include "amdgpu_xcp.h"
29 #include "gfx_v9_4_3.h"
30 #include "gfxhub_v1_2.h"
31 #include "sdma_v4_4_2.h"
32
33 #define XCP_INST_MASK(num_inst, xcp_id)                                        \
34         (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
35
36 #define AMDGPU_XCP_OPS_KFD      (1 << 0)
37
38 void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
39 {
40         int i;
41
42         adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
43
44         adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
45
46         adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
47         adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
48         adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
49
50         adev->doorbell_index.sdma_doorbell_range = 20;
51         for (i = 0; i < adev->sdma.num_instances; i++)
52                 adev->doorbell_index.sdma_engine[i] =
53                         AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
54                         i * (adev->doorbell_index.sdma_doorbell_range >> 1);
55
56         adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
57         adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
58
59         adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
60         adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
61
62         adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
63 }
64
65 static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
66 {
67         return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
68 }
69
70 static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
71                              uint32_t inst_idx, struct amdgpu_ring *ring)
72 {
73         int xcp_id;
74         enum AMDGPU_XCP_IP_BLOCK ip_blk;
75         uint32_t inst_mask;
76
77         ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
78         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
79                 adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
80         if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
81                 return;
82
83         inst_mask = 1 << inst_idx;
84
85         switch (ring->funcs->type) {
86         case AMDGPU_HW_IP_GFX:
87         case AMDGPU_RING_TYPE_COMPUTE:
88         case AMDGPU_RING_TYPE_KIQ:
89                 ip_blk = AMDGPU_XCP_GFX;
90                 break;
91         case AMDGPU_RING_TYPE_SDMA:
92                 ip_blk = AMDGPU_XCP_SDMA;
93                 break;
94         case AMDGPU_RING_TYPE_VCN_ENC:
95         case AMDGPU_RING_TYPE_VCN_JPEG:
96                 ip_blk = AMDGPU_XCP_VCN;
97                 break;
98         default:
99                 DRM_ERROR("Not support ring type %d!", ring->funcs->type);
100                 return;
101         }
102
103         for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
104                 if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
105                         ring->xcp_id = xcp_id;
106                         dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
107                                 ring->xcp_id);
108                         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
109                                 adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
110                         break;
111                 }
112         }
113 }
114
115 static void aqua_vanjaram_xcp_gpu_sched_update(
116                 struct amdgpu_device *adev,
117                 struct amdgpu_ring *ring,
118                 unsigned int sel_xcp_id)
119 {
120         unsigned int *num_gpu_sched;
121
122         num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
123                         .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
124         adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
125                         .sched[(*num_gpu_sched)++] = &ring->sched;
126         DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
127                         sel_xcp_id, ring->funcs->type,
128                         ring->hw_prio, *num_gpu_sched);
129 }
130
131 static int aqua_vanjaram_xcp_sched_list_update(
132                 struct amdgpu_device *adev)
133 {
134         struct amdgpu_ring *ring;
135         int i;
136
137         for (i = 0; i < MAX_XCP; i++) {
138                 atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
139                 memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
140         }
141
142         if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
143                 return 0;
144
145         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
146                 ring = adev->rings[i];
147                 if (!ring || !ring->sched.ready || ring->no_scheduler)
148                         continue;
149
150                 aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
151
152                 /* VCN may be shared by two partitions under CPX MODE in certain
153                  * configs.
154                  */
155                 if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
156                      ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
157                     aqua_vanjaram_xcp_vcn_shared(adev))
158                         aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
159         }
160
161         return 0;
162 }
163
164 static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
165 {
166         int i;
167
168         for (i = 0; i < adev->num_rings; i++) {
169                 struct amdgpu_ring *ring = adev->rings[i];
170
171                 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
172                         ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
173                         aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
174                 else
175                         aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
176         }
177
178         return aqua_vanjaram_xcp_sched_list_update(adev);
179 }
180
181 static int aqua_vanjaram_select_scheds(
182                 struct amdgpu_device *adev,
183                 u32 hw_ip,
184                 u32 hw_prio,
185                 struct amdgpu_fpriv *fpriv,
186                 unsigned int *num_scheds,
187                 struct drm_gpu_scheduler ***scheds)
188 {
189         u32 sel_xcp_id;
190         int i;
191
192         if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
193                 u32 least_ref_cnt = ~0;
194
195                 fpriv->xcp_id = 0;
196                 for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
197                         u32 total_ref_cnt;
198
199                         total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
200                         if (total_ref_cnt < least_ref_cnt) {
201                                 fpriv->xcp_id = i;
202                                 least_ref_cnt = total_ref_cnt;
203                         }
204                 }
205         }
206         sel_xcp_id = fpriv->xcp_id;
207
208         if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
209                 *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
210                 *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
211                 atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
212                 DRM_DEBUG("Selected partition #%d", sel_xcp_id);
213         } else {
214                 DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
215                 return -ENOENT;
216         }
217
218         return 0;
219 }
220
221 static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
222                                          enum amd_hw_ip_block_type block,
223                                          int8_t inst)
224 {
225         int8_t dev_inst;
226
227         switch (block) {
228         case GC_HWIP:
229         case SDMA0_HWIP:
230         /* Both JPEG and VCN as JPEG is only alias of VCN */
231         case VCN_HWIP:
232                 dev_inst = adev->ip_map.dev_inst[block][inst];
233                 break;
234         default:
235                 /* For rest of the IPs, no look up required.
236                  * Assume 'logical instance == physical instance' for all configs. */
237                 dev_inst = inst;
238                 break;
239         }
240
241         return dev_inst;
242 }
243
244 static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
245                                          enum amd_hw_ip_block_type block,
246                                          uint32_t mask)
247 {
248         uint32_t dev_mask = 0;
249         int8_t log_inst, dev_inst;
250
251         while (mask) {
252                 log_inst = ffs(mask) - 1;
253                 dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
254                 dev_mask |= (1 << dev_inst);
255                 mask &= ~(1 << log_inst);
256         }
257
258         return dev_mask;
259 }
260
261 static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
262                                           enum amd_hw_ip_block_type ip_block,
263                                           uint32_t inst_mask)
264 {
265         int l = 0, i;
266
267         while (inst_mask) {
268                 i = ffs(inst_mask) - 1;
269                 adev->ip_map.dev_inst[ip_block][l++] = i;
270                 inst_mask &= ~(1 << i);
271         }
272         for (; l < HWIP_MAX_INSTANCE; l++)
273                 adev->ip_map.dev_inst[ip_block][l] = -1;
274 }
275
276 void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
277 {
278         u32 ip_map[][2] = {
279                 { GC_HWIP, adev->gfx.xcc_mask },
280                 { SDMA0_HWIP, adev->sdma.sdma_mask },
281                 { VCN_HWIP, adev->vcn.inst_mask },
282         };
283         int i;
284
285         for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
286                 aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
287
288         adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
289         adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
290 }
291
292 /* Fixed pattern for smn addressing on different AIDs:
293  *   bit[34]: indicate cross AID access
294  *   bit[33:32]: indicate target AID id
295  * AID id range is 0 ~ 3 as maximum AID number is 4.
296  */
297 u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
298 {
299         u64 ext_offset;
300
301         /* local routing and bit[34:32] will be zeros */
302         if (ext_id == 0)
303                 return 0;
304
305         /* Initiated from host, accessing to all non-zero aids are cross traffic */
306         ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
307
308         return ext_offset;
309 }
310
311 static enum amdgpu_gfx_partition
312 __aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
313 {
314         struct amdgpu_device *adev = xcp_mgr->adev;
315         int num_xcc, num_xcc_per_xcp = 0, mode = 0;
316
317         num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
318         if (adev->gfx.funcs->get_xccs_per_xcp)
319                 num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
320         if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0))
321                 mode = num_xcc / num_xcc_per_xcp;
322
323         if (num_xcc_per_xcp == 1)
324                 return AMDGPU_CPX_PARTITION_MODE;
325
326         switch (mode) {
327         case 1:
328                 return AMDGPU_SPX_PARTITION_MODE;
329         case 2:
330                 return AMDGPU_DPX_PARTITION_MODE;
331         case 3:
332                 return AMDGPU_TPX_PARTITION_MODE;
333         case 4:
334                 return AMDGPU_QPX_PARTITION_MODE;
335         default:
336                 return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
337         }
338
339         return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
340 }
341
342 static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
343 {
344         enum amdgpu_gfx_partition derv_mode,
345                 mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
346         struct amdgpu_device *adev = xcp_mgr->adev;
347
348         derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr);
349
350         if (amdgpu_sriov_vf(adev))
351                 return derv_mode;
352
353         if (adev->nbio.funcs->get_compute_partition_mode) {
354                 mode = adev->nbio.funcs->get_compute_partition_mode(adev);
355                 if (mode != derv_mode)
356                         dev_warn(
357                                 adev->dev,
358                                 "Mismatch in compute partition mode - reported : %d derived : %d",
359                                 mode, derv_mode);
360         }
361
362         return mode;
363 }
364
365 static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
366 {
367         int num_xcc, num_xcc_per_xcp = 0;
368
369         num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
370
371         switch (mode) {
372         case AMDGPU_SPX_PARTITION_MODE:
373                 num_xcc_per_xcp = num_xcc;
374                 break;
375         case AMDGPU_DPX_PARTITION_MODE:
376                 num_xcc_per_xcp = num_xcc / 2;
377                 break;
378         case AMDGPU_TPX_PARTITION_MODE:
379                 num_xcc_per_xcp = num_xcc / 3;
380                 break;
381         case AMDGPU_QPX_PARTITION_MODE:
382                 num_xcc_per_xcp = num_xcc / 4;
383                 break;
384         case AMDGPU_CPX_PARTITION_MODE:
385                 num_xcc_per_xcp = 1;
386                 break;
387         }
388
389         return num_xcc_per_xcp;
390 }
391
392 static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
393                                     enum AMDGPU_XCP_IP_BLOCK ip_id,
394                                     struct amdgpu_xcp_ip *ip)
395 {
396         struct amdgpu_device *adev = xcp_mgr->adev;
397         int num_sdma, num_vcn, num_shared_vcn, num_xcp;
398         int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
399
400         num_sdma = adev->sdma.num_instances;
401         num_vcn = adev->vcn.num_vcn_inst;
402         num_shared_vcn = 1;
403
404         num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
405         num_xcp = NUM_XCC(adev->gfx.xcc_mask) / num_xcc_xcp;
406
407         switch (xcp_mgr->mode) {
408         case AMDGPU_SPX_PARTITION_MODE:
409         case AMDGPU_DPX_PARTITION_MODE:
410         case AMDGPU_TPX_PARTITION_MODE:
411         case AMDGPU_QPX_PARTITION_MODE:
412         case AMDGPU_CPX_PARTITION_MODE:
413                 num_sdma_xcp = DIV_ROUND_UP(num_sdma, num_xcp);
414                 num_vcn_xcp = DIV_ROUND_UP(num_vcn, num_xcp);
415                 break;
416         default:
417                 return -EINVAL;
418         }
419
420         if (num_vcn && num_xcp > num_vcn)
421                 num_shared_vcn = num_xcp / num_vcn;
422
423         switch (ip_id) {
424         case AMDGPU_XCP_GFXHUB:
425                 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
426                 ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
427                 break;
428         case AMDGPU_XCP_GFX:
429                 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
430                 ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
431                 break;
432         case AMDGPU_XCP_SDMA:
433                 ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
434                 ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
435                 break;
436         case AMDGPU_XCP_VCN:
437                 ip->inst_mask =
438                         XCP_INST_MASK(num_vcn_xcp, xcp_id / num_shared_vcn);
439                 /* TODO : Assign IP funcs */
440                 break;
441         default:
442                 return -EINVAL;
443         }
444
445         ip->ip_id = ip_id;
446
447         return 0;
448 }
449
450 static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
451                                           int mode,
452                                           struct amdgpu_xcp_cfg *xcp_cfg)
453 {
454         struct amdgpu_device *adev = xcp_mgr->adev;
455         int max_res[AMDGPU_XCP_RES_MAX] = {};
456         bool res_lt_xcp;
457         int num_xcp, i;
458
459         if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
460                 return -EINVAL;
461
462         max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
463         max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
464         max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
465         max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
466
467         switch (mode) {
468         case AMDGPU_SPX_PARTITION_MODE:
469                 num_xcp = 1;
470                 break;
471         case AMDGPU_DPX_PARTITION_MODE:
472                 num_xcp = 2;
473                 break;
474         case AMDGPU_TPX_PARTITION_MODE:
475                 num_xcp = 3;
476                 break;
477         case AMDGPU_QPX_PARTITION_MODE:
478                 num_xcp = 4;
479                 break;
480         case AMDGPU_CPX_PARTITION_MODE:
481                 num_xcp = NUM_XCC(adev->gfx.xcc_mask);
482                 break;
483         default:
484                 return -EINVAL;
485         }
486
487         xcp_cfg->num_res = ARRAY_SIZE(max_res);
488
489         for (i = 0; i < xcp_cfg->num_res; i++) {
490                 res_lt_xcp = max_res[i] < num_xcp;
491                 xcp_cfg->xcp_res[i].id = i;
492                 xcp_cfg->xcp_res[i].num_inst =
493                         res_lt_xcp ? 1 : max_res[i] / num_xcp;
494                 xcp_cfg->xcp_res[i].num_inst =
495                         i == AMDGPU_XCP_RES_JPEG ?
496                         xcp_cfg->xcp_res[i].num_inst *
497                         adev->jpeg.num_jpeg_rings : xcp_cfg->xcp_res[i].num_inst;
498                 xcp_cfg->xcp_res[i].num_shared =
499                         res_lt_xcp ? num_xcp / max_res[i] : 1;
500         }
501
502         return 0;
503 }
504
505 static enum amdgpu_gfx_partition
506 __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
507 {
508         struct amdgpu_device *adev = xcp_mgr->adev;
509         int num_xcc;
510
511         num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
512
513         if (adev->gmc.num_mem_partitions == 1)
514                 return AMDGPU_SPX_PARTITION_MODE;
515
516         if (adev->gmc.num_mem_partitions == num_xcc)
517                 return AMDGPU_CPX_PARTITION_MODE;
518
519         if (adev->gmc.num_mem_partitions == num_xcc / 2)
520                 return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
521                                                     AMDGPU_CPX_PARTITION_MODE;
522
523         if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
524                 return AMDGPU_DPX_PARTITION_MODE;
525
526         return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
527 }
528
529 static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
530                                           enum amdgpu_gfx_partition mode)
531 {
532         struct amdgpu_device *adev = xcp_mgr->adev;
533         int num_xcc, num_xccs_per_xcp;
534
535         num_xcc = NUM_XCC(adev->gfx.xcc_mask);
536         switch (mode) {
537         case AMDGPU_SPX_PARTITION_MODE:
538                 return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
539         case AMDGPU_DPX_PARTITION_MODE:
540                 return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0;
541         case AMDGPU_TPX_PARTITION_MODE:
542                 return (adev->gmc.num_mem_partitions == 1 ||
543                         adev->gmc.num_mem_partitions == 3) &&
544                        ((num_xcc % 3) == 0);
545         case AMDGPU_QPX_PARTITION_MODE:
546                 num_xccs_per_xcp = num_xcc / 4;
547                 return (adev->gmc.num_mem_partitions == 1 ||
548                         adev->gmc.num_mem_partitions == 4) &&
549                        (num_xccs_per_xcp >= 2);
550         case AMDGPU_CPX_PARTITION_MODE:
551                 return ((num_xcc > 1) &&
552                        (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) &&
553                        (num_xcc % adev->gmc.num_mem_partitions) == 0);
554         default:
555                 return false;
556         }
557
558         return false;
559 }
560
561 static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
562 {
563         /* TODO:
564          * Stop user queues and threads, and make sure GPU is empty of work.
565          */
566
567         if (flags & AMDGPU_XCP_OPS_KFD)
568                 amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
569
570         return 0;
571 }
572
573 static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
574 {
575         int ret = 0;
576
577         if (flags & AMDGPU_XCP_OPS_KFD) {
578                 amdgpu_amdkfd_device_probe(xcp_mgr->adev);
579                 amdgpu_amdkfd_device_init(xcp_mgr->adev);
580                 /* If KFD init failed, return failure */
581                 if (!xcp_mgr->adev->kfd.init_complete)
582                         ret = -EIO;
583         }
584
585         return ret;
586 }
587
588 static void
589 __aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
590 {
591         struct amdgpu_device *adev = xcp_mgr->adev;
592
593         xcp_mgr->supp_xcp_modes = 0;
594
595         switch (NUM_XCC(adev->gfx.xcc_mask)) {
596         case 8:
597                 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
598                                           BIT(AMDGPU_DPX_PARTITION_MODE) |
599                                           BIT(AMDGPU_QPX_PARTITION_MODE) |
600                                           BIT(AMDGPU_CPX_PARTITION_MODE);
601                 break;
602         case 6:
603                 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
604                                           BIT(AMDGPU_TPX_PARTITION_MODE) |
605                                           BIT(AMDGPU_CPX_PARTITION_MODE);
606                 break;
607         case 4:
608                 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
609                                           BIT(AMDGPU_DPX_PARTITION_MODE) |
610                                           BIT(AMDGPU_CPX_PARTITION_MODE);
611                 break;
612         /* this seems only existing in emulation phase */
613         case 2:
614                 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
615                                           BIT(AMDGPU_CPX_PARTITION_MODE);
616                 break;
617         case 1:
618                 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
619                                           BIT(AMDGPU_CPX_PARTITION_MODE);
620                 break;
621
622         default:
623                 break;
624         }
625 }
626
627 static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
628 {
629         int mode;
630
631         xcp_mgr->avail_xcp_modes = 0;
632
633         for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
634                 if (__aqua_vanjaram_is_valid_mode(xcp_mgr, mode))
635                         xcp_mgr->avail_xcp_modes |= BIT(mode);
636         }
637 }
638
639 static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
640                                                int mode, int *num_xcps)
641 {
642         int num_xcc_per_xcp, num_xcc, ret;
643         struct amdgpu_device *adev;
644         u32 flags = 0;
645
646         adev = xcp_mgr->adev;
647         num_xcc = NUM_XCC(adev->gfx.xcc_mask);
648
649         if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
650                 mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
651                 if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
652                         dev_err(adev->dev,
653                                 "Invalid config, no compatible compute partition mode found, available memory partitions: %d",
654                                 adev->gmc.num_mem_partitions);
655                         return -EINVAL;
656                 }
657         } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
658                 dev_err(adev->dev,
659                         "Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
660                         amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
661                 return -EINVAL;
662         }
663
664         if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
665                 flags |= AMDGPU_XCP_OPS_KFD;
666
667         if (flags & AMDGPU_XCP_OPS_KFD) {
668                 ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
669                 if (ret)
670                         goto out;
671         }
672
673         ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
674         if (ret)
675                 goto unlock;
676
677         num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
678         if (adev->gfx.funcs->switch_partition_mode)
679                 adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
680                                                        num_xcc_per_xcp);
681
682         /* Init info about new xcps */
683         *num_xcps = num_xcc / num_xcc_per_xcp;
684         amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
685
686         ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
687         if (!ret)
688                 __aqua_vanjaram_update_available_partition_mode(xcp_mgr);
689 unlock:
690         if (flags & AMDGPU_XCP_OPS_KFD)
691                 amdgpu_amdkfd_unlock_kfd(adev);
692 out:
693         return ret;
694 }
695
696 static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
697                                           int xcc_id, uint8_t *mem_id)
698 {
699         /* memory/spatial modes validation check is already done */
700         *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
701         *mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
702
703         return 0;
704 }
705
706 static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
707                                         struct amdgpu_xcp *xcp, uint8_t *mem_id)
708 {
709         struct amdgpu_numa_info numa_info;
710         struct amdgpu_device *adev;
711         uint32_t xcc_mask;
712         int r, i, xcc_id;
713
714         adev = xcp_mgr->adev;
715         /* TODO: BIOS is not returning the right info now
716          * Check on this later
717          */
718         /*
719         if (adev->gmc.gmc_funcs->query_mem_partition_mode)
720                 mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
721         */
722         if (adev->gmc.num_mem_partitions == 1) {
723                 /* Only one range */
724                 *mem_id = 0;
725                 return 0;
726         }
727
728         r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
729         if (r || !xcc_mask)
730                 return -EINVAL;
731
732         xcc_id = ffs(xcc_mask) - 1;
733         if (!adev->gmc.is_app_apu)
734                 return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
735
736         r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
737
738         if (r)
739                 return r;
740
741         r = -EINVAL;
742         for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
743                 if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
744                         *mem_id = i;
745                         r = 0;
746                         break;
747                 }
748         }
749
750         return r;
751 }
752
753 static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
754                                      enum AMDGPU_XCP_IP_BLOCK ip_id,
755                                      struct amdgpu_xcp_ip *ip)
756 {
757         if (!ip)
758                 return -EINVAL;
759
760         return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
761 }
762
763 struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
764         .switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
765         .query_partition_mode = &aqua_vanjaram_query_partition_mode,
766         .get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
767         .get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
768         .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
769         .select_scheds = &aqua_vanjaram_select_scheds,
770         .update_partition_sched_list =
771                 &aqua_vanjaram_update_partition_sched_list
772 };
773
774 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
775 {
776         int ret;
777
778         if (amdgpu_sriov_vf(adev))
779                 aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL;
780
781         ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
782                                   &aqua_vanjaram_xcp_funcs);
783         if (ret)
784                 return ret;
785
786         __aqua_vanjaram_update_supported_modes(adev->xcp_mgr);
787         /* TODO: Default memory node affinity init */
788
789         return ret;
790 }
791
792 int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
793 {
794         u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask;
795         int ret, i;
796
797         /* generally 1 AID supports 4 instances */
798         adev->sdma.num_inst_per_aid = 4;
799         adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
800
801         adev->aid_mask = i = 1;
802         inst_mask >>= adev->sdma.num_inst_per_aid;
803
804         for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
805              inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
806                 avail_inst = inst_mask & mask;
807                 if (avail_inst == mask || avail_inst == 0x3 ||
808                     avail_inst == 0xc)
809                         adev->aid_mask |= (1 << i);
810         }
811
812         /* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
813          * addressed based on logical instance ids.
814          */
815         adev->vcn.harvest_config = 0;
816         adev->vcn.num_inst_per_aid = 1;
817         adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
818         adev->jpeg.harvest_config = 0;
819         adev->jpeg.num_inst_per_aid = 1;
820         adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
821
822         ret = aqua_vanjaram_xcp_mgr_init(adev);
823         if (ret)
824                 return ret;
825
826         aqua_vanjaram_ip_map_init(adev);
827
828         return 0;
829 }
830
831 static void aqua_read_smn(struct amdgpu_device *adev,
832                           struct amdgpu_smn_reg_data *regdata,
833                           uint64_t smn_addr)
834 {
835         regdata->addr = smn_addr;
836         regdata->value = RREG32_PCIE(smn_addr);
837 }
838
839 struct aqua_reg_list {
840         uint64_t start_addr;
841         uint32_t num_regs;
842         uint32_t incrx;
843 };
844
845 #define DW_ADDR_INCR    4
846
847 static void aqua_read_smn_ext(struct amdgpu_device *adev,
848                               struct amdgpu_smn_reg_data *regdata,
849                               uint64_t smn_addr, int i)
850 {
851         regdata->addr =
852                 smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
853         regdata->value = RREG32_PCIE_EXT(regdata->addr);
854 }
855
856 #define smnreg_0x1A340218       0x1A340218
857 #define smnreg_0x1A3402E4       0x1A3402E4
858 #define smnreg_0x1A340294       0x1A340294
859 #define smreg_0x1A380088        0x1A380088
860
861 #define NUM_PCIE_SMN_REGS       14
862
863 static struct aqua_reg_list pcie_reg_addrs[] = {
864         { smnreg_0x1A340218, 1, 0 },
865         { smnreg_0x1A3402E4, 1, 0 },
866         { smnreg_0x1A340294, 6, DW_ADDR_INCR },
867         { smreg_0x1A380088, 6, DW_ADDR_INCR },
868 };
869
870 static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
871                                              void *buf, size_t max_size)
872 {
873         struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
874         uint32_t start_addr, incrx, num_regs, szbuf;
875         struct amdgpu_regs_pcie_v1_0 *pcie_regs;
876         struct amdgpu_smn_reg_data *reg_data;
877         struct pci_dev *us_pdev, *ds_pdev;
878         int aer_cap, r, n;
879
880         if (!buf || !max_size)
881                 return -EINVAL;
882
883         pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
884
885         szbuf = sizeof(*pcie_reg_state) +
886                 amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
887         /* Only one instance of pcie regs */
888         if (max_size < szbuf)
889                 return -EOVERFLOW;
890
891         pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
892                                                      sizeof(*pcie_reg_state));
893         pcie_regs->inst_header.instance = 0;
894         pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
895         pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
896
897         reg_data = pcie_regs->smn_reg_values;
898
899         for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
900                 start_addr = pcie_reg_addrs[r].start_addr;
901                 incrx = pcie_reg_addrs[r].incrx;
902                 num_regs = pcie_reg_addrs[r].num_regs;
903                 for (n = 0; n < num_regs; n++) {
904                         aqua_read_smn(adev, reg_data, start_addr + n * incrx);
905                         ++reg_data;
906                 }
907         }
908
909         ds_pdev = pci_upstream_bridge(adev->pdev);
910         us_pdev = pci_upstream_bridge(ds_pdev);
911
912         pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
913                                   &pcie_regs->device_status);
914         pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
915                                   &pcie_regs->link_status);
916
917         aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
918         if (aer_cap) {
919                 pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
920                                       &pcie_regs->pcie_corr_err_status);
921                 pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
922                                       &pcie_regs->pcie_uncorr_err_status);
923         }
924
925         pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
926                               &pcie_regs->sub_bus_number_latency);
927
928         pcie_reg_state->common_header.structure_size = szbuf;
929         pcie_reg_state->common_header.format_revision = 1;
930         pcie_reg_state->common_header.content_revision = 0;
931         pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
932         pcie_reg_state->common_header.num_instances = 1;
933
934         return pcie_reg_state->common_header.structure_size;
935 }
936
937 #define smnreg_0x11A00050       0x11A00050
938 #define smnreg_0x11A00180       0x11A00180
939 #define smnreg_0x11A00070       0x11A00070
940 #define smnreg_0x11A00200       0x11A00200
941 #define smnreg_0x11A0020C       0x11A0020C
942 #define smnreg_0x11A00210       0x11A00210
943 #define smnreg_0x11A00108       0x11A00108
944
945 #define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
946
947 #define NUM_XGMI_SMN_REGS 25
948
949 static struct aqua_reg_list xgmi_reg_addrs[] = {
950         { smnreg_0x11A00050, 1, 0 },
951         { smnreg_0x11A00180, 16, DW_ADDR_INCR },
952         { smnreg_0x11A00070, 4, DW_ADDR_INCR },
953         { smnreg_0x11A00200, 1, 0 },
954         { smnreg_0x11A0020C, 1, 0 },
955         { smnreg_0x11A00210, 1, 0 },
956         { smnreg_0x11A00108, 1, 0 },
957 };
958
959 static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
960                                              void *buf, size_t max_size)
961 {
962         struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
963         uint32_t start_addr, incrx, num_regs, szbuf;
964         struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
965         struct amdgpu_smn_reg_data *reg_data;
966         const int max_xgmi_instances = 8;
967         int inst = 0, i, j, r, n;
968         const int xgmi_inst = 2;
969         void *p;
970
971         if (!buf || !max_size)
972                 return -EINVAL;
973
974         xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
975
976         szbuf = sizeof(*xgmi_reg_state) +
977                 amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
978                                     NUM_XGMI_SMN_REGS);
979         /* Only one instance of pcie regs */
980         if (max_size < szbuf)
981                 return -EOVERFLOW;
982
983         p = &xgmi_reg_state->xgmi_state_regs[0];
984         for_each_inst(i, adev->aid_mask) {
985                 for (j = 0; j < xgmi_inst; ++j) {
986                         xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
987                         xgmi_regs->inst_header.instance = inst++;
988
989                         xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
990                         xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
991
992                         reg_data = xgmi_regs->smn_reg_values;
993
994                         for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
995                                 start_addr = xgmi_reg_addrs[r].start_addr;
996                                 incrx = xgmi_reg_addrs[r].incrx;
997                                 num_regs = xgmi_reg_addrs[r].num_regs;
998
999                                 for (n = 0; n < num_regs; n++) {
1000                                         aqua_read_smn_ext(
1001                                                 adev, reg_data,
1002                                                 XGMI_LINK_REG(start_addr, j) +
1003                                                         n * incrx,
1004                                                 i);
1005                                         ++reg_data;
1006                                 }
1007                         }
1008                         p = reg_data;
1009                 }
1010         }
1011
1012         xgmi_reg_state->common_header.structure_size = szbuf;
1013         xgmi_reg_state->common_header.format_revision = 1;
1014         xgmi_reg_state->common_header.content_revision = 0;
1015         xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
1016         xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
1017
1018         return xgmi_reg_state->common_header.structure_size;
1019 }
1020
1021 #define smnreg_0x11C00070       0x11C00070
1022 #define smnreg_0x11C00210       0x11C00210
1023
1024 static struct aqua_reg_list wafl_reg_addrs[] = {
1025         { smnreg_0x11C00070, 4, DW_ADDR_INCR },
1026         { smnreg_0x11C00210, 1, 0 },
1027 };
1028
1029 #define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
1030
1031 #define NUM_WAFL_SMN_REGS 5
1032
1033 static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
1034                                              void *buf, size_t max_size)
1035 {
1036         struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
1037         uint32_t start_addr, incrx, num_regs, szbuf;
1038         struct amdgpu_regs_wafl_v1_0 *wafl_regs;
1039         struct amdgpu_smn_reg_data *reg_data;
1040         const int max_wafl_instances = 8;
1041         int inst = 0, i, j, r, n;
1042         const int wafl_inst = 2;
1043         void *p;
1044
1045         if (!buf || !max_size)
1046                 return -EINVAL;
1047
1048         wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
1049
1050         szbuf = sizeof(*wafl_reg_state) +
1051                 amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
1052                                     NUM_WAFL_SMN_REGS);
1053
1054         if (max_size < szbuf)
1055                 return -EOVERFLOW;
1056
1057         p = &wafl_reg_state->wafl_state_regs[0];
1058         for_each_inst(i, adev->aid_mask) {
1059                 for (j = 0; j < wafl_inst; ++j) {
1060                         wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
1061                         wafl_regs->inst_header.instance = inst++;
1062
1063                         wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
1064                         wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
1065
1066                         reg_data = wafl_regs->smn_reg_values;
1067
1068                         for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
1069                                 start_addr = wafl_reg_addrs[r].start_addr;
1070                                 incrx = wafl_reg_addrs[r].incrx;
1071                                 num_regs = wafl_reg_addrs[r].num_regs;
1072                                 for (n = 0; n < num_regs; n++) {
1073                                         aqua_read_smn_ext(
1074                                                 adev, reg_data,
1075                                                 WAFL_LINK_REG(start_addr, j) +
1076                                                         n * incrx,
1077                                                 i);
1078                                         ++reg_data;
1079                                 }
1080                         }
1081                         p = reg_data;
1082                 }
1083         }
1084
1085         wafl_reg_state->common_header.structure_size = szbuf;
1086         wafl_reg_state->common_header.format_revision = 1;
1087         wafl_reg_state->common_header.content_revision = 0;
1088         wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
1089         wafl_reg_state->common_header.num_instances = max_wafl_instances;
1090
1091         return wafl_reg_state->common_header.structure_size;
1092 }
1093
1094 #define smnreg_0x1B311060 0x1B311060
1095 #define smnreg_0x1B411060 0x1B411060
1096 #define smnreg_0x1B511060 0x1B511060
1097 #define smnreg_0x1B611060 0x1B611060
1098
1099 #define smnreg_0x1C307120 0x1C307120
1100 #define smnreg_0x1C317120 0x1C317120
1101
1102 #define smnreg_0x1C320830 0x1C320830
1103 #define smnreg_0x1C380830 0x1C380830
1104 #define smnreg_0x1C3D0830 0x1C3D0830
1105 #define smnreg_0x1C420830 0x1C420830
1106
1107 #define smnreg_0x1C320100 0x1C320100
1108 #define smnreg_0x1C380100 0x1C380100
1109 #define smnreg_0x1C3D0100 0x1C3D0100
1110 #define smnreg_0x1C420100 0x1C420100
1111
1112 #define smnreg_0x1B310500 0x1B310500
1113 #define smnreg_0x1C300400 0x1C300400
1114
1115 #define USR_CAKE_INCR 0x11000
1116 #define USR_LINK_INCR 0x100000
1117 #define USR_CP_INCR 0x10000
1118
1119 #define NUM_USR_SMN_REGS        20
1120
1121 struct aqua_reg_list usr_reg_addrs[] = {
1122         { smnreg_0x1B311060, 4, DW_ADDR_INCR },
1123         { smnreg_0x1B411060, 4, DW_ADDR_INCR },
1124         { smnreg_0x1B511060, 4, DW_ADDR_INCR },
1125         { smnreg_0x1B611060, 4, DW_ADDR_INCR },
1126         { smnreg_0x1C307120, 2, DW_ADDR_INCR },
1127         { smnreg_0x1C317120, 2, DW_ADDR_INCR },
1128 };
1129
1130 #define NUM_USR1_SMN_REGS       46
1131 struct aqua_reg_list usr1_reg_addrs[] = {
1132         { smnreg_0x1C320830, 6, USR_CAKE_INCR },
1133         { smnreg_0x1C380830, 5, USR_CAKE_INCR },
1134         { smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
1135         { smnreg_0x1C420830, 4, USR_CAKE_INCR },
1136         { smnreg_0x1C320100, 6, USR_CAKE_INCR },
1137         { smnreg_0x1C380100, 5, USR_CAKE_INCR },
1138         { smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
1139         { smnreg_0x1C420100, 4, USR_CAKE_INCR },
1140         { smnreg_0x1B310500, 4, USR_LINK_INCR },
1141         { smnreg_0x1C300400, 2, USR_CP_INCR },
1142 };
1143
1144 static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
1145                                             void *buf, size_t max_size,
1146                                             int reg_state)
1147 {
1148         uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
1149         struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
1150         struct amdgpu_regs_usr_v1_0 *usr_regs;
1151         struct amdgpu_smn_reg_data *reg_data;
1152         const int max_usr_instances = 4;
1153         struct aqua_reg_list *reg_addrs;
1154         int inst = 0, i, n, r, arr_size;
1155         void *p;
1156
1157         if (!buf || !max_size)
1158                 return -EINVAL;
1159
1160         switch (reg_state) {
1161         case AMDGPU_REG_STATE_TYPE_USR:
1162                 arr_size = ARRAY_SIZE(usr_reg_addrs);
1163                 reg_addrs = usr_reg_addrs;
1164                 num_smn = NUM_USR_SMN_REGS;
1165                 break;
1166         case AMDGPU_REG_STATE_TYPE_USR_1:
1167                 arr_size = ARRAY_SIZE(usr1_reg_addrs);
1168                 reg_addrs = usr1_reg_addrs;
1169                 num_smn = NUM_USR1_SMN_REGS;
1170                 break;
1171         default:
1172                 return -EINVAL;
1173         }
1174
1175         usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
1176
1177         szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
1178                                                              sizeof(*usr_regs),
1179                                                              num_smn);
1180         if (max_size < szbuf)
1181                 return -EOVERFLOW;
1182
1183         p = &usr_reg_state->usr_state_regs[0];
1184         for_each_inst(i, adev->aid_mask) {
1185                 usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
1186                 usr_regs->inst_header.instance = inst++;
1187                 usr_regs->inst_header.state = AMDGPU_INST_S_OK;
1188                 usr_regs->inst_header.num_smn_regs = num_smn;
1189                 reg_data = usr_regs->smn_reg_values;
1190
1191                 for (r = 0; r < arr_size; r++) {
1192                         start_addr = reg_addrs[r].start_addr;
1193                         incrx = reg_addrs[r].incrx;
1194                         num_regs = reg_addrs[r].num_regs;
1195                         for (n = 0; n < num_regs; n++) {
1196                                 aqua_read_smn_ext(adev, reg_data,
1197                                                   start_addr + n * incrx, i);
1198                                 reg_data++;
1199                         }
1200                 }
1201                 p = reg_data;
1202         }
1203
1204         usr_reg_state->common_header.structure_size = szbuf;
1205         usr_reg_state->common_header.format_revision = 1;
1206         usr_reg_state->common_header.content_revision = 0;
1207         usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
1208         usr_reg_state->common_header.num_instances = max_usr_instances;
1209
1210         return usr_reg_state->common_header.structure_size;
1211 }
1212
1213 ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
1214                                     enum amdgpu_reg_state reg_state, void *buf,
1215                                     size_t max_size)
1216 {
1217         ssize_t size;
1218
1219         switch (reg_state) {
1220         case AMDGPU_REG_STATE_TYPE_PCIE:
1221                 size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
1222                 break;
1223         case AMDGPU_REG_STATE_TYPE_XGMI:
1224                 size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
1225                 break;
1226         case AMDGPU_REG_STATE_TYPE_WAFL:
1227                 size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
1228                 break;
1229         case AMDGPU_REG_STATE_TYPE_USR:
1230                 size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
1231                                                     AMDGPU_REG_STATE_TYPE_USR);
1232                 break;
1233         case AMDGPU_REG_STATE_TYPE_USR_1:
1234                 size = aqua_vanjaram_read_usr_state(
1235                         adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
1236                 break;
1237         default:
1238                 return -EINVAL;
1239         }
1240
1241         return size;
1242 }
This page took 0.102328 seconds and 4 git commands to generate.