2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "df/df_3_6_default.h"
27 #include "df/df_3_6_offset.h"
28 #include "df/df_3_6_sh_mask.h"
30 static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0,
31 16, 32, 0, 0, 0, 2, 4, 8};
33 static void df_v3_6_init(struct amdgpu_device *adev)
37 static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
43 tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl);
44 tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
45 WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp);
47 WREG32_SOC15(DF, 0, mmFabricConfigAccessControl,
48 mmFabricConfigAccessControl_DEFAULT);
51 static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev)
55 tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0);
56 tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK;
57 tmp >>= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
62 static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
64 int fb_channel_number;
66 fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
67 if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number))
68 fb_channel_number = 0;
70 return df_v3_6_channel_number[fb_channel_number];
73 static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
78 /* Put DF on broadcast mode */
79 adev->df_funcs->enable_broadcast_mode(adev, true);
81 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
82 tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
83 tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
84 tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
85 WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
87 tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
88 tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
89 tmp |= DF_V3_6_MGCG_DISABLE;
90 WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
93 /* Exit broadcast mode */
94 adev->df_funcs->enable_broadcast_mode(adev, false);
97 static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
102 /* AMD_CG_SUPPORT_DF_MGCG */
103 tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
104 if (tmp & DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY)
105 *flags |= AMD_CG_SUPPORT_DF_MGCG;
108 /* hold counter assignment per gpu struct */
109 struct df_v3_6_event_mask {
110 struct amdgpu_device gpu;
111 uint64_t config_assign_mask[AMDGPU_DF_MAX_COUNTERS];
114 /* get assigned df perfmon ctr as int */
115 static void df_v3_6_pmc_config_2_cntr(struct amdgpu_device *adev,
119 struct df_v3_6_event_mask *mask;
122 mask = container_of(adev, struct df_v3_6_event_mask, gpu);
124 for (i = 0; i < AMDGPU_DF_MAX_COUNTERS; i++) {
125 if ((config & 0x0FFFFFFUL) == mask->config_assign_mask[i]) {
132 /* get address based on counter assignment */
133 static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
136 uint32_t *lo_base_addr,
137 uint32_t *hi_base_addr)
140 int target_cntr = -1;
142 df_v3_6_pmc_config_2_cntr(adev, config, &target_cntr);
147 switch (target_cntr) {
150 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
151 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
154 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
155 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
158 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
159 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
162 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
163 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
170 /* get read counter address */
171 static void df_v3_6_pmc_get_read_settings(struct amdgpu_device *adev,
173 uint32_t *lo_base_addr,
174 uint32_t *hi_base_addr)
176 df_v3_6_pmc_get_addr(adev, config, 0, lo_base_addr, hi_base_addr);
179 /* get control counter settings i.e. address and values to set */
180 static void df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
182 uint32_t *lo_base_addr,
183 uint32_t *hi_base_addr,
188 uint32_t eventsel, instance, unitmask;
189 uint32_t es_5_0, es_13_0, es_13_6, es_13_12, es_11_8, es_7_0;
191 df_v3_6_pmc_get_addr(adev, config, 1, lo_base_addr, hi_base_addr);
193 if (lo_val == NULL || hi_val == NULL)
196 if ((*lo_base_addr == 0) || (*hi_base_addr == 0)) {
197 DRM_ERROR("DF PMC addressing not retrieved! Lo: %x, Hi: %x",
198 *lo_base_addr, *hi_base_addr);
202 eventsel = GET_EVENT(config);
203 instance = GET_INSTANCE(config);
204 unitmask = GET_UNITMASK(config);
206 es_5_0 = eventsel & 0x3FUL;
208 es_13_0 = (es_13_6 << 6) + es_5_0;
209 es_13_12 = (es_13_0 & 0x03000UL) >> 12;
210 es_11_8 = (es_13_0 & 0x0F00UL) >> 8;
211 es_7_0 = es_13_0 & 0x0FFUL;
212 *lo_val = (es_7_0 & 0xFFUL) | ((unitmask & 0x0FUL) << 8);
213 *hi_val = (es_11_8 | ((es_13_12)<<(29)));
216 /* assign df performance counters for read */
217 static int df_v3_6_pmc_assign_cntr(struct amdgpu_device *adev,
222 struct df_v3_6_event_mask *mask;
229 df_v3_6_pmc_config_2_cntr(adev, config, &target_cntr);
231 if (target_cntr >= 0) {
236 mask = container_of(adev, struct df_v3_6_event_mask, gpu);
238 for (i = 0; i < AMDGPU_DF_MAX_COUNTERS; i++) {
239 if (mask->config_assign_mask[i] == 0ULL) {
240 mask->config_assign_mask[i] = config & 0x0FFFFFFUL;
248 /* release performance counter */
249 static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
253 struct df_v3_6_event_mask *mask;
258 df_v3_6_pmc_config_2_cntr(adev, config, &target_cntr);
260 mask = container_of(adev, struct df_v3_6_event_mask, gpu);
262 if (target_cntr >= 0)
263 mask->config_assign_mask[target_cntr] = 0ULL;
268 * get xgmi link counters via programmable data fabric (df) counters (max 4)
269 * using cake tx event.
271 * @adev -> amdgpu device
272 * @instance-> currently cake has 2 links to poll on vega20
273 * @count -> counters to pass
277 static void df_v3_6_get_xgmi_link_cntr(struct amdgpu_device *adev,
281 uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
284 config = GET_INSTANCE_CONFIG(instance);
286 df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
289 if ((lo_base_addr == 0) || (hi_base_addr == 0))
292 lo_val = RREG32_PCIE(lo_base_addr);
293 hi_val = RREG32_PCIE(hi_base_addr);
295 *count = ((hi_val | 0ULL) << 32) | (lo_val | 0ULL);
299 * reset xgmi link counters
301 * @adev -> amdgpu device
302 * @instance-> currently cake has 2 links to poll on vega20
305 static void df_v3_6_reset_xgmi_link_cntr(struct amdgpu_device *adev,
308 uint32_t lo_base_addr, hi_base_addr;
311 config = 0ULL | (0x7ULL) | ((0x46ULL + instance) << 8) | (0x2 << 16);
313 df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
316 if ((lo_base_addr == 0) || (hi_base_addr == 0))
319 WREG32_PCIE(lo_base_addr, 0UL);
320 WREG32_PCIE(hi_base_addr, 0UL);
324 * add xgmi link counters
326 * @adev -> amdgpu device
327 * @instance-> currently cake has 2 links to poll on vega20
331 static int df_v3_6_add_xgmi_link_cntr(struct amdgpu_device *adev,
334 uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
336 int ret, is_assigned;
338 if (instance < 0 || instance > 1)
341 config = GET_INSTANCE_CONFIG(instance);
343 ret = df_v3_6_pmc_assign_cntr(adev, config, &is_assigned);
345 if (ret || is_assigned)
348 df_v3_6_pmc_get_ctrl_settings(adev,
355 WREG32_PCIE(lo_base_addr, lo_val);
356 WREG32_PCIE(hi_base_addr, hi_val);
363 * start xgmi link counters
365 * @adev -> amdgpu device
366 * @instance-> currently cake has 2 links to poll on vega20
367 * @is_enable -> either resume or assign event via df perfmon
371 static int df_v3_6_start_xgmi_link_cntr(struct amdgpu_device *adev,
375 uint32_t lo_base_addr, hi_base_addr, lo_val;
379 if (instance < 0 || instance > 1)
384 ret = df_v3_6_add_xgmi_link_cntr(adev, instance);
391 config = GET_INSTANCE_CONFIG(instance);
393 df_v3_6_pmc_get_ctrl_settings(adev,
400 if (lo_base_addr == 0)
403 lo_val = RREG32_PCIE(lo_base_addr);
405 WREG32_PCIE(lo_base_addr, lo_val | (1ULL << 22));
415 * start xgmi link counters
417 * @adev -> amdgpu device
418 * @instance-> currently cake has 2 links to poll on vega20
419 * @is_enable -> either pause or unassign event via df perfmon
423 static int df_v3_6_stop_xgmi_link_cntr(struct amdgpu_device *adev,
428 uint32_t lo_base_addr, hi_base_addr, lo_val;
431 config = GET_INSTANCE_CONFIG(instance);
434 df_v3_6_reset_xgmi_link_cntr(adev, instance);
435 df_v3_6_pmc_release_cntr(adev, config);
438 df_v3_6_pmc_get_ctrl_settings(adev,
445 if ((lo_base_addr == 0) || (hi_base_addr == 0))
448 lo_val = RREG32_PCIE(lo_base_addr);
450 WREG32_PCIE(lo_base_addr, lo_val & ~(1ULL << 22));
456 static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
459 int xgmi_tx_link, ret = 0;
461 switch (adev->asic_type) {
463 xgmi_tx_link = IS_DF_XGMI_0_TX(config) ? 0
464 : (IS_DF_XGMI_1_TX(config) ? 1 : -1);
466 if (xgmi_tx_link >= 0)
467 ret = df_v3_6_start_xgmi_link_cntr(adev, xgmi_tx_link,
482 static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
485 int xgmi_tx_link, ret = 0;
487 switch (adev->asic_type) {
489 xgmi_tx_link = IS_DF_XGMI_0_TX(config) ? 0
490 : (IS_DF_XGMI_1_TX(config) ? 1 : -1);
492 if (xgmi_tx_link >= 0) {
493 ret = df_v3_6_stop_xgmi_link_cntr(adev,
509 static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
516 switch (adev->asic_type) {
518 xgmi_tx_link = IS_DF_XGMI_0_TX(config) ? 0
519 : (IS_DF_XGMI_1_TX(config) ? 1 : -1);
521 if (xgmi_tx_link >= 0) {
522 df_v3_6_reset_xgmi_link_cntr(adev, xgmi_tx_link);
523 df_v3_6_get_xgmi_link_cntr(adev, xgmi_tx_link, count);
533 const struct amdgpu_df_funcs df_v3_6_funcs = {
534 .init = df_v3_6_init,
535 .enable_broadcast_mode = df_v3_6_enable_broadcast_mode,
536 .get_fb_channel_number = df_v3_6_get_fb_channel_number,
537 .get_hbm_channel_number = df_v3_6_get_hbm_channel_number,
538 .update_medium_grain_clock_gating =
539 df_v3_6_update_medium_grain_clock_gating,
540 .get_clockgating_state = df_v3_6_get_clockgating_state,
541 .pmc_start = df_v3_6_pmc_start,
542 .pmc_stop = df_v3_6_pmc_stop,
543 .pmc_get_count = df_v3_6_pmc_get_count