]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/df_v3_6.c
Merge tag 'for-5.10-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux.git] / drivers / gpu / drm / amd / amdgpu / df_v3_6.c
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "df_v3_6.h"
25
26 #include "df/df_3_6_default.h"
27 #include "df/df_3_6_offset.h"
28 #include "df/df_3_6_sh_mask.h"
29
30 #define DF_3_6_SMN_REG_INST_DIST        0x8
31 #define DF_3_6_INST_CNT                 8
32
33 static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0,
34                                        16, 32, 0, 0, 0, 2, 4, 8};
35
36 /* init df format attrs */
37 AMDGPU_PMU_ATTR(event,          "config:0-7");
38 AMDGPU_PMU_ATTR(instance,       "config:8-15");
39 AMDGPU_PMU_ATTR(umask,          "config:16-23");
40
41 /* df format attributes  */
42 static struct attribute *df_v3_6_format_attrs[] = {
43         &pmu_attr_event.attr,
44         &pmu_attr_instance.attr,
45         &pmu_attr_umask.attr,
46         NULL
47 };
48
49 /* df format attribute group */
50 static struct attribute_group df_v3_6_format_attr_group = {
51         .name = "format",
52         .attrs = df_v3_6_format_attrs,
53 };
54
55 /* df event attrs */
56 AMDGPU_PMU_ATTR(cake0_pcsout_txdata,
57                       "event=0x7,instance=0x46,umask=0x2");
58 AMDGPU_PMU_ATTR(cake1_pcsout_txdata,
59                       "event=0x7,instance=0x47,umask=0x2");
60 AMDGPU_PMU_ATTR(cake0_pcsout_txmeta,
61                       "event=0x7,instance=0x46,umask=0x4");
62 AMDGPU_PMU_ATTR(cake1_pcsout_txmeta,
63                       "event=0x7,instance=0x47,umask=0x4");
64 AMDGPU_PMU_ATTR(cake0_ftiinstat_reqalloc,
65                       "event=0xb,instance=0x46,umask=0x4");
66 AMDGPU_PMU_ATTR(cake1_ftiinstat_reqalloc,
67                       "event=0xb,instance=0x47,umask=0x4");
68 AMDGPU_PMU_ATTR(cake0_ftiinstat_rspalloc,
69                       "event=0xb,instance=0x46,umask=0x8");
70 AMDGPU_PMU_ATTR(cake1_ftiinstat_rspalloc,
71                       "event=0xb,instance=0x47,umask=0x8");
72
73 /* df event attributes  */
74 static struct attribute *df_v3_6_event_attrs[] = {
75         &pmu_attr_cake0_pcsout_txdata.attr,
76         &pmu_attr_cake1_pcsout_txdata.attr,
77         &pmu_attr_cake0_pcsout_txmeta.attr,
78         &pmu_attr_cake1_pcsout_txmeta.attr,
79         &pmu_attr_cake0_ftiinstat_reqalloc.attr,
80         &pmu_attr_cake1_ftiinstat_reqalloc.attr,
81         &pmu_attr_cake0_ftiinstat_rspalloc.attr,
82         &pmu_attr_cake1_ftiinstat_rspalloc.attr,
83         NULL
84 };
85
86 /* df event attribute group */
87 static struct attribute_group df_v3_6_event_attr_group = {
88         .name = "events",
89         .attrs = df_v3_6_event_attrs
90 };
91
92 /* df event attr groups  */
93 const struct attribute_group *df_v3_6_attr_groups[] = {
94                 &df_v3_6_format_attr_group,
95                 &df_v3_6_event_attr_group,
96                 NULL
97 };
98
99 static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
100                                  uint32_t ficaa_val)
101 {
102         unsigned long flags, address, data;
103         uint32_t ficadl_val, ficadh_val;
104
105         address = adev->nbio.funcs->get_pcie_index_offset(adev);
106         data = adev->nbio.funcs->get_pcie_data_offset(adev);
107
108         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
109         WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
110         WREG32(data, ficaa_val);
111
112         WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3);
113         ficadl_val = RREG32(data);
114
115         WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3);
116         ficadh_val = RREG32(data);
117
118         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
119
120         return (((ficadh_val & 0xFFFFFFFFFFFFFFFF) << 32) | ficadl_val);
121 }
122
123 static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
124                              uint32_t ficadl_val, uint32_t ficadh_val)
125 {
126         unsigned long flags, address, data;
127
128         address = adev->nbio.funcs->get_pcie_index_offset(adev);
129         data = adev->nbio.funcs->get_pcie_data_offset(adev);
130
131         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
132         WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
133         WREG32(data, ficaa_val);
134
135         WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3);
136         WREG32(data, ficadl_val);
137
138         WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3);
139         WREG32(data, ficadh_val);
140
141         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
142 }
143
144 /*
145  * df_v3_6_perfmon_rreg - read perfmon lo and hi
146  *
147  * required to be atomic.  no mmio method provided so subsequent reads for lo
148  * and hi require to preserve df finite state machine
149  */
150 static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
151                             uint32_t lo_addr, uint32_t *lo_val,
152                             uint32_t hi_addr, uint32_t *hi_val)
153 {
154         unsigned long flags, address, data;
155
156         address = adev->nbio.funcs->get_pcie_index_offset(adev);
157         data = adev->nbio.funcs->get_pcie_data_offset(adev);
158
159         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
160         WREG32(address, lo_addr);
161         *lo_val = RREG32(data);
162         WREG32(address, hi_addr);
163         *hi_val = RREG32(data);
164         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
165 }
166
167 /*
168  * df_v3_6_perfmon_wreg - write to perfmon lo and hi
169  *
170  * required to be atomic.  no mmio method provided so subsequent reads after
171  * data writes cannot occur to preserve data fabrics finite state machine.
172  */
173 static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
174                             uint32_t lo_val, uint32_t hi_addr, uint32_t hi_val)
175 {
176         unsigned long flags, address, data;
177
178         address = adev->nbio.funcs->get_pcie_index_offset(adev);
179         data = adev->nbio.funcs->get_pcie_data_offset(adev);
180
181         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
182         WREG32(address, lo_addr);
183         WREG32(data, lo_val);
184         WREG32(address, hi_addr);
185         WREG32(data, hi_val);
186         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
187 }
188
189 /* same as perfmon_wreg but return status on write value check */
190 static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev,
191                                           uint32_t lo_addr, uint32_t lo_val,
192                                           uint32_t hi_addr, uint32_t  hi_val)
193 {
194         unsigned long flags, address, data;
195         uint32_t lo_val_rb, hi_val_rb;
196
197         address = adev->nbio.funcs->get_pcie_index_offset(adev);
198         data = adev->nbio.funcs->get_pcie_data_offset(adev);
199
200         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
201         WREG32(address, lo_addr);
202         WREG32(data, lo_val);
203         WREG32(address, hi_addr);
204         WREG32(data, hi_val);
205
206         WREG32(address, lo_addr);
207         lo_val_rb = RREG32(data);
208         WREG32(address, hi_addr);
209         hi_val_rb = RREG32(data);
210         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
211
212         if (!(lo_val == lo_val_rb && hi_val == hi_val_rb))
213                 return -EBUSY;
214
215         return 0;
216 }
217
218
219 /*
220  * retry arming counters every 100 usecs within 1 millisecond interval.
221  * if retry fails after time out, return error.
222  */
223 #define ARM_RETRY_USEC_TIMEOUT  1000
224 #define ARM_RETRY_USEC_INTERVAL 100
225 static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
226                                           uint32_t lo_addr, uint32_t lo_val,
227                                           uint32_t hi_addr, uint32_t  hi_val)
228 {
229         int countdown = ARM_RETRY_USEC_TIMEOUT;
230
231         while (countdown) {
232
233                 if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val,
234                                                      hi_addr, hi_val))
235                         break;
236
237                 countdown -= ARM_RETRY_USEC_INTERVAL;
238                 udelay(ARM_RETRY_USEC_INTERVAL);
239         }
240
241         return countdown > 0 ? 0 : -ETIME;
242 }
243
244 /* get the number of df counters available */
245 static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
246                 struct device_attribute *attr,
247                 char *buf)
248 {
249         struct amdgpu_device *adev;
250         struct drm_device *ddev;
251         int i, count;
252
253         ddev = dev_get_drvdata(dev);
254         adev = drm_to_adev(ddev);
255         count = 0;
256
257         for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) {
258                 if (adev->df_perfmon_config_assign_mask[i] == 0)
259                         count++;
260         }
261
262         return snprintf(buf, PAGE_SIZE, "%i\n", count);
263 }
264
265 /* device attr for available perfmon counters */
266 static DEVICE_ATTR(df_cntr_avail, S_IRUGO, df_v3_6_get_df_cntr_avail, NULL);
267
268 static void df_v3_6_query_hashes(struct amdgpu_device *adev)
269 {
270         u32 tmp;
271
272         adev->df.hash_status.hash_64k = false;
273         adev->df.hash_status.hash_2m = false;
274         adev->df.hash_status.hash_1g = false;
275
276         if (adev->asic_type != CHIP_ARCTURUS)
277                 return;
278
279         /* encoding for hash-enabled on Arcturus */
280         if (adev->df.funcs->get_fb_channel_number(adev) == 0xe) {
281                 tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DfGlobalCtrl);
282                 adev->df.hash_status.hash_64k = REG_GET_FIELD(tmp,
283                                                 DF_CS_UMC_AON0_DfGlobalCtrl,
284                                                 GlbHashIntlvCtl64K);
285                 adev->df.hash_status.hash_2m = REG_GET_FIELD(tmp,
286                                                 DF_CS_UMC_AON0_DfGlobalCtrl,
287                                                 GlbHashIntlvCtl2M);
288                 adev->df.hash_status.hash_1g = REG_GET_FIELD(tmp,
289                                                 DF_CS_UMC_AON0_DfGlobalCtrl,
290                                                 GlbHashIntlvCtl1G);
291         }
292 }
293
294 /* init perfmons */
295 static void df_v3_6_sw_init(struct amdgpu_device *adev)
296 {
297         int i, ret;
298
299         ret = device_create_file(adev->dev, &dev_attr_df_cntr_avail);
300         if (ret)
301                 DRM_ERROR("failed to create file for available df counters\n");
302
303         for (i = 0; i < AMDGPU_MAX_DF_PERFMONS; i++)
304                 adev->df_perfmon_config_assign_mask[i] = 0;
305
306         df_v3_6_query_hashes(adev);
307 }
308
309 static void df_v3_6_sw_fini(struct amdgpu_device *adev)
310 {
311
312         device_remove_file(adev->dev, &dev_attr_df_cntr_avail);
313
314 }
315
316 static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
317                                           bool enable)
318 {
319         u32 tmp;
320
321         if (enable) {
322                 tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl);
323                 tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
324                 WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp);
325         } else
326                 WREG32_SOC15(DF, 0, mmFabricConfigAccessControl,
327                              mmFabricConfigAccessControl_DEFAULT);
328 }
329
330 static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev)
331 {
332         u32 tmp;
333
334         tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0);
335         tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK;
336         tmp >>= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
337
338         return tmp;
339 }
340
341 static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
342 {
343         int fb_channel_number;
344
345         fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
346         if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number))
347                 fb_channel_number = 0;
348
349         return df_v3_6_channel_number[fb_channel_number];
350 }
351
352 static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
353                                                      bool enable)
354 {
355         u32 tmp;
356
357         if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) {
358                 /* Put DF on broadcast mode */
359                 adev->df.funcs->enable_broadcast_mode(adev, true);
360
361                 if (enable) {
362                         tmp = RREG32_SOC15(DF, 0,
363                                         mmDF_PIE_AON0_DfGlobalClkGater);
364                         tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
365                         tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
366                         WREG32_SOC15(DF, 0,
367                                         mmDF_PIE_AON0_DfGlobalClkGater, tmp);
368                 } else {
369                         tmp = RREG32_SOC15(DF, 0,
370                                         mmDF_PIE_AON0_DfGlobalClkGater);
371                         tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
372                         tmp |= DF_V3_6_MGCG_DISABLE;
373                         WREG32_SOC15(DF, 0,
374                                         mmDF_PIE_AON0_DfGlobalClkGater, tmp);
375                 }
376
377                 /* Exit broadcast mode */
378                 adev->df.funcs->enable_broadcast_mode(adev, false);
379         }
380 }
381
382 static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
383                                           u32 *flags)
384 {
385         u32 tmp;
386
387         /* AMD_CG_SUPPORT_DF_MGCG */
388         tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
389         if (tmp & DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY)
390                 *flags |= AMD_CG_SUPPORT_DF_MGCG;
391 }
392
393 /* get assigned df perfmon ctr as int */
394 static int df_v3_6_pmc_config_2_cntr(struct amdgpu_device *adev,
395                                       uint64_t config)
396 {
397         int i;
398
399         for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) {
400                 if ((config & 0x0FFFFFFUL) ==
401                                         adev->df_perfmon_config_assign_mask[i])
402                         return i;
403         }
404
405         return -EINVAL;
406 }
407
408 /* get address based on counter assignment */
409 static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
410                                  uint64_t config,
411                                  int is_ctrl,
412                                  uint32_t *lo_base_addr,
413                                  uint32_t *hi_base_addr)
414 {
415         int target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
416
417         if (target_cntr < 0)
418                 return;
419
420         switch (target_cntr) {
421
422         case 0:
423                 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
424                 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
425                 break;
426         case 1:
427                 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
428                 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
429                 break;
430         case 2:
431                 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
432                 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
433                 break;
434         case 3:
435                 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
436                 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
437                 break;
438
439         }
440
441 }
442
443 /* get read counter address */
444 static void df_v3_6_pmc_get_read_settings(struct amdgpu_device *adev,
445                                           uint64_t config,
446                                           uint32_t *lo_base_addr,
447                                           uint32_t *hi_base_addr)
448 {
449         df_v3_6_pmc_get_addr(adev, config, 0, lo_base_addr, hi_base_addr);
450 }
451
452 /* get control counter settings i.e. address and values to set */
453 static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
454                                           uint64_t config,
455                                           uint32_t *lo_base_addr,
456                                           uint32_t *hi_base_addr,
457                                           uint32_t *lo_val,
458                                           uint32_t *hi_val,
459                                           bool is_enable)
460 {
461
462         uint32_t eventsel, instance, unitmask;
463         uint32_t instance_10, instance_5432, instance_76;
464
465         df_v3_6_pmc_get_addr(adev, config, 1, lo_base_addr, hi_base_addr);
466
467         if ((*lo_base_addr == 0) || (*hi_base_addr == 0)) {
468                 DRM_ERROR("[DF PMC] addressing not retrieved! Lo: %x, Hi: %x",
469                                 *lo_base_addr, *hi_base_addr);
470                 return -ENXIO;
471         }
472
473         eventsel = DF_V3_6_GET_EVENT(config) & 0x3f;
474         unitmask = DF_V3_6_GET_UNITMASK(config) & 0xf;
475         instance = DF_V3_6_GET_INSTANCE(config);
476
477         instance_10 = instance & 0x3;
478         instance_5432 = (instance >> 2) & 0xf;
479         instance_76 = (instance >> 6) & 0x3;
480
481         *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel;
482         *lo_val = is_enable ? *lo_val | (1 << 22) : *lo_val & ~(1 << 22);
483         *hi_val = (instance_76 << 29) | instance_5432;
484
485         DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x",
486                 config, *lo_base_addr, *hi_base_addr, *lo_val, *hi_val);
487
488         return 0;
489 }
490
491 /* add df performance counters for read */
492 static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
493                                    uint64_t config)
494 {
495         int i, target_cntr;
496
497         target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
498
499         if (target_cntr >= 0)
500                 return 0;
501
502         for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) {
503                 if (adev->df_perfmon_config_assign_mask[i] == 0U) {
504                         adev->df_perfmon_config_assign_mask[i] =
505                                                         config & 0x0FFFFFFUL;
506                         return 0;
507                 }
508         }
509
510         return -ENOSPC;
511 }
512
513 #define DEFERRED_ARM_MASK       (1 << 31)
514 static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
515                                     uint64_t config, bool is_deferred)
516 {
517         int target_cntr;
518
519         target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
520
521         if (target_cntr < 0)
522                 return -EINVAL;
523
524         if (is_deferred)
525                 adev->df_perfmon_config_assign_mask[target_cntr] |=
526                                                         DEFERRED_ARM_MASK;
527         else
528                 adev->df_perfmon_config_assign_mask[target_cntr] &=
529                                                         ~DEFERRED_ARM_MASK;
530
531         return 0;
532 }
533
534 static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
535                                     uint64_t config)
536 {
537         int target_cntr;
538
539         target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
540
541         /*
542          * we never get target_cntr < 0 since this funciton is only called in
543          * pmc_count for now but we should check anyways.
544          */
545         return (target_cntr >= 0 &&
546                         (adev->df_perfmon_config_assign_mask[target_cntr]
547                         & DEFERRED_ARM_MASK));
548
549 }
550
551 /* release performance counter */
552 static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
553                                      uint64_t config)
554 {
555         int target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
556
557         if (target_cntr >= 0)
558                 adev->df_perfmon_config_assign_mask[target_cntr] = 0ULL;
559 }
560
561
562 static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev,
563                                          uint64_t config)
564 {
565         uint32_t lo_base_addr = 0, hi_base_addr = 0;
566
567         df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
568                                       &hi_base_addr);
569
570         if ((lo_base_addr == 0) || (hi_base_addr == 0))
571                 return;
572
573         df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
574 }
575
576 static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
577                              int is_add)
578 {
579         uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
580         int err = 0, ret = 0;
581
582         switch (adev->asic_type) {
583         case CHIP_VEGA20:
584                 if (is_add)
585                         return df_v3_6_pmc_add_cntr(adev, config);
586
587                 df_v3_6_reset_perfmon_cntr(adev, config);
588
589                 ret = df_v3_6_pmc_get_ctrl_settings(adev,
590                                         config,
591                                         &lo_base_addr,
592                                         &hi_base_addr,
593                                         &lo_val,
594                                         &hi_val,
595                                         true);
596
597                 if (ret)
598                         return ret;
599
600                 err = df_v3_6_perfmon_arm_with_retry(adev,
601                                                      lo_base_addr,
602                                                      lo_val,
603                                                      hi_base_addr,
604                                                      hi_val);
605
606                 if (err)
607                         ret = df_v3_6_pmc_set_deferred(adev, config, true);
608
609                 break;
610         default:
611                 break;
612         }
613
614         return ret;
615 }
616
617 static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
618                             int is_remove)
619 {
620         uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
621         int ret = 0;
622
623         switch (adev->asic_type) {
624         case CHIP_VEGA20:
625                 ret = df_v3_6_pmc_get_ctrl_settings(adev,
626                         config,
627                         &lo_base_addr,
628                         &hi_base_addr,
629                         &lo_val,
630                         &hi_val,
631                         false);
632
633                 if (ret)
634                         return ret;
635
636
637                 if (is_remove) {
638                         df_v3_6_reset_perfmon_cntr(adev, config);
639                         df_v3_6_pmc_release_cntr(adev, config);
640                 }
641
642                 break;
643         default:
644                 break;
645         }
646
647         return ret;
648 }
649
650 static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
651                                   uint64_t config,
652                                   uint64_t *count)
653 {
654         uint32_t lo_base_addr = 0, hi_base_addr = 0, lo_val = 0, hi_val = 0;
655         *count = 0;
656
657         switch (adev->asic_type) {
658         case CHIP_VEGA20:
659                 df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
660                                       &hi_base_addr);
661
662                 if ((lo_base_addr == 0) || (hi_base_addr == 0))
663                         return;
664
665                 /* rearm the counter or throw away count value on failure */
666                 if (df_v3_6_pmc_is_deferred(adev, config)) {
667                         int rearm_err = df_v3_6_perfmon_arm_with_status(adev,
668                                                         lo_base_addr, lo_val,
669                                                         hi_base_addr, hi_val);
670
671                         if (rearm_err)
672                                 return;
673
674                         df_v3_6_pmc_set_deferred(adev, config, false);
675                 }
676
677                 df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
678                                 hi_base_addr, &hi_val);
679
680                 *count  = ((hi_val | 0ULL) << 32) | (lo_val | 0ULL);
681
682                 if (*count >= DF_V3_6_PERFMON_OVERFLOW)
683                         *count = 0;
684
685                 DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x",
686                          config, lo_base_addr, hi_base_addr, lo_val, hi_val);
687
688                 break;
689         default:
690                 break;
691         }
692 }
693
694 const struct amdgpu_df_funcs df_v3_6_funcs = {
695         .sw_init = df_v3_6_sw_init,
696         .sw_fini = df_v3_6_sw_fini,
697         .enable_broadcast_mode = df_v3_6_enable_broadcast_mode,
698         .get_fb_channel_number = df_v3_6_get_fb_channel_number,
699         .get_hbm_channel_number = df_v3_6_get_hbm_channel_number,
700         .update_medium_grain_clock_gating =
701                         df_v3_6_update_medium_grain_clock_gating,
702         .get_clockgating_state = df_v3_6_get_clockgating_state,
703         .pmc_start = df_v3_6_pmc_start,
704         .pmc_stop = df_v3_6_pmc_stop,
705         .pmc_get_count = df_v3_6_pmc_get_count,
706         .get_fica = df_v3_6_get_fica,
707         .set_fica = df_v3_6_set_fica,
708 };
This page took 0.080149 seconds and 4 git commands to generate.