1 // SPDX-License-Identifier: MIT
3 * Copyright © 2022 Intel Corporation
8 #include <linux/delay.h>
10 #include <drm/drm_managed.h>
12 #include "abi/guc_actions_abi.h"
13 #include "abi/guc_actions_slpc_abi.h"
14 #include "regs/xe_gt_regs.h"
15 #include "regs/xe_regs.h"
17 #include "xe_device.h"
19 #include "xe_gt_idle.h"
20 #include "xe_gt_sysfs.h"
21 #include "xe_gt_types.h"
22 #include "xe_guc_ct.h"
27 #define MCHBAR_MIRROR_BASE_SNB 0x140000
29 #define RP_STATE_CAP XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
30 #define RP0_MASK REG_GENMASK(7, 0)
31 #define RP1_MASK REG_GENMASK(15, 8)
32 #define RPN_MASK REG_GENMASK(23, 16)
34 #define FREQ_INFO_REC XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
35 #define RPE_MASK REG_GENMASK(15, 8)
37 #define GT_PERF_STATUS XE_REG(0x1381b4)
38 #define CAGF_MASK REG_GENMASK(19, 11)
40 #define GT_FREQUENCY_MULTIPLIER 50
41 #define GT_FREQUENCY_SCALER 3
44 * DOC: GuC Power Conservation (PC)
46 * GuC Power Conservation (PC) supports multiple features for the most
47 * efficient and performing use of the GT when GuC submission is enabled,
48 * including frequency management, Render-C states management, and various
49 * algorithms for power balancing.
51 * Single Loop Power Conservation (SLPC) is the name given to the suite of
52 * connected power conservation features in the GuC firmware. The firmware
53 * exposes a programming interface to the host for the control of SLPC.
55 * Frequency management:
56 * =====================
58 * Xe driver enables SLPC with all of its defaults features and frequency
59 * selection, which varies per platform.
64 * Render-C states is also a GuC PC feature that is now enabled in Xe for
69 static struct xe_guc *
70 pc_to_guc(struct xe_guc_pc *pc)
72 return container_of(pc, struct xe_guc, pc);
75 static struct xe_device *
76 pc_to_xe(struct xe_guc_pc *pc)
78 struct xe_guc *guc = pc_to_guc(pc);
79 struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc);
85 pc_to_gt(struct xe_guc_pc *pc)
87 return container_of(pc, struct xe_gt, uc.guc.pc);
90 static struct iosys_map *
91 pc_to_maps(struct xe_guc_pc *pc)
96 #define slpc_shared_data_read(pc_, field_) \
97 xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
98 struct slpc_shared_data, field_)
100 #define slpc_shared_data_write(pc_, field_, val_) \
101 xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
102 struct slpc_shared_data, field_, val_)
104 #define SLPC_EVENT(id, count) \
105 (FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
106 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
108 static int wait_for_pc_state(struct xe_guc_pc *pc,
109 enum slpc_global_state state)
111 int timeout_us = 5000; /* rought 5ms, but no need for precision */
112 int slept, wait = 10;
114 xe_device_assert_mem_access(pc_to_xe(pc));
116 for (slept = 0; slept < timeout_us;) {
117 if (slpc_shared_data_read(pc, header.global_state) == state)
120 usleep_range(wait, wait << 1);
123 if (slept + wait > timeout_us)
124 wait = timeout_us - slept;
130 static int pc_action_reset(struct xe_guc_pc *pc)
132 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
135 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
136 SLPC_EVENT(SLPC_EVENT_RESET, 2),
137 xe_bo_ggtt_addr(pc->bo),
141 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
143 drm_err(&pc_to_xe(pc)->drm, "GuC PC reset: %pe", ERR_PTR(ret));
148 static int pc_action_shutdown(struct xe_guc_pc *pc)
150 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
153 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
154 SLPC_EVENT(SLPC_EVENT_SHUTDOWN, 2),
155 xe_bo_ggtt_addr(pc->bo),
159 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
161 drm_err(&pc_to_xe(pc)->drm, "GuC PC shutdown %pe",
167 static int pc_action_query_task_state(struct xe_guc_pc *pc)
169 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
172 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
173 SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
174 xe_bo_ggtt_addr(pc->bo),
178 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
181 /* Blocking here to ensure the results are ready before reading them */
182 ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
184 drm_err(&pc_to_xe(pc)->drm,
185 "GuC PC query task state failed: %pe", ERR_PTR(ret));
190 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
192 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
195 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
196 SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
201 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
204 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
206 drm_err(&pc_to_xe(pc)->drm, "GuC PC set param failed: %pe",
212 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
214 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
216 XE_GUC_ACTION_SETUP_PC_GUCRC,
221 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
223 drm_err(&pc_to_xe(pc)->drm, "GuC RC enable failed: %pe",
228 static u32 decode_freq(u32 raw)
230 return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
231 GT_FREQUENCY_SCALER);
234 static u32 encode_freq(u32 freq)
236 return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
237 GT_FREQUENCY_MULTIPLIER);
240 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
244 freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
245 slpc_shared_data_read(pc, task_state_data.freq));
247 return decode_freq(freq);
250 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
252 struct xe_gt *gt = pc_to_gt(pc);
253 u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
255 /* Allow/Disallow punit to process software freq requests */
256 xe_mmio_write32(gt, RP_CONTROL, state);
259 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
261 struct xe_gt *gt = pc_to_gt(pc);
264 pc_set_manual_rp_ctrl(pc, true);
266 /* Req freq is in units of 16.66 Mhz */
267 rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
268 xe_mmio_write32(gt, RPNSWREQ, rpnswreq);
270 /* Sleep for a small time to allow pcode to respond */
271 usleep_range(100, 300);
273 pc_set_manual_rp_ctrl(pc, false);
276 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
279 * Let's only check for the rpn-rp0 range. If max < min,
280 * min becomes a fixed request.
282 if (freq < pc->rpn_freq || freq > pc->rp0_freq)
286 * GuC policy is to elevate minimum frequency to the efficient levels
287 * Our goal is to have the admin choices respected.
289 pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
290 freq < pc->rpe_freq);
292 return pc_action_set_param(pc,
293 SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
297 static int pc_get_max_freq(struct xe_guc_pc *pc)
301 freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
302 slpc_shared_data_read(pc, task_state_data.freq));
304 return decode_freq(freq);
307 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
310 * Let's only check for the rpn-rp0 range. If max < min,
311 * min becomes a fixed request.
312 * Also, overclocking is not supported.
314 if (freq < pc->rpn_freq || freq > pc->rp0_freq)
317 return pc_action_set_param(pc,
318 SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
322 static void mtl_update_rpe_value(struct xe_guc_pc *pc)
324 struct xe_gt *gt = pc_to_gt(pc);
327 if (xe_gt_is_media_type(gt))
328 reg = xe_mmio_read32(gt, MTL_MPE_FREQUENCY);
330 reg = xe_mmio_read32(gt, MTL_GT_RPE_FREQUENCY);
332 pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
335 static void tgl_update_rpe_value(struct xe_guc_pc *pc)
337 struct xe_gt *gt = pc_to_gt(pc);
338 struct xe_device *xe = gt_to_xe(gt);
342 * For PVC we still need to use fused RP1 as the approximation for RPe
343 * For other platforms than PVC we get the resolved RPe directly from
344 * PCODE at a different register
346 if (xe->info.platform == XE_PVC)
347 reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
349 reg = xe_mmio_read32(gt, FREQ_INFO_REC);
351 pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
354 static void pc_update_rp_values(struct xe_guc_pc *pc)
356 struct xe_gt *gt = pc_to_gt(pc);
357 struct xe_device *xe = gt_to_xe(gt);
359 if (GRAPHICS_VERx100(xe) >= 1270)
360 mtl_update_rpe_value(pc);
362 tgl_update_rpe_value(pc);
365 * RPe is decided at runtime by PCODE. In the rare case where that's
366 * smaller than the fused min, we will trust the PCODE and use that
367 * as our minimum one.
369 pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
373 * xe_guc_pc_get_act_freq - Get Actual running frequency
376 * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
378 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
380 struct xe_gt *gt = pc_to_gt(pc);
381 struct xe_device *xe = gt_to_xe(gt);
384 xe_device_mem_access_get(gt_to_xe(gt));
386 /* When in RC6, actual frequency reported will be 0. */
387 if (GRAPHICS_VERx100(xe) >= 1270) {
388 freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
389 freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
391 freq = xe_mmio_read32(gt, GT_PERF_STATUS);
392 freq = REG_FIELD_GET(CAGF_MASK, freq);
395 freq = decode_freq(freq);
397 xe_device_mem_access_put(gt_to_xe(gt));
403 * xe_guc_pc_get_cur_freq - Get Current requested frequency
405 * @freq: A pointer to a u32 where the freq value will be returned
407 * Returns: 0 on success,
408 * -EAGAIN if GuC PC not ready (likely in middle of a reset).
410 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
412 struct xe_gt *gt = pc_to_gt(pc);
415 xe_device_mem_access_get(gt_to_xe(gt));
417 * GuC SLPC plays with cur freq request when GuCRC is enabled
418 * Block RC6 for a more reliable read.
420 ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
424 *freq = xe_mmio_read32(gt, RPNSWREQ);
426 *freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq);
427 *freq = decode_freq(*freq);
429 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
431 xe_device_mem_access_put(gt_to_xe(gt));
436 * xe_guc_pc_get_rp0_freq - Get the RP0 freq
441 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
447 * xe_guc_pc_get_rpe_freq - Get the RPe freq
452 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
454 struct xe_gt *gt = pc_to_gt(pc);
455 struct xe_device *xe = gt_to_xe(gt);
457 xe_device_mem_access_get(xe);
458 pc_update_rp_values(pc);
459 xe_device_mem_access_put(xe);
465 * xe_guc_pc_get_rpn_freq - Get the RPn freq
470 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
476 * xe_guc_pc_get_min_freq - Get the min operational frequency
478 * @freq: A pointer to a u32 where the freq value will be returned
480 * Returns: 0 on success,
481 * -EAGAIN if GuC PC not ready (likely in middle of a reset).
483 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
485 struct xe_gt *gt = pc_to_gt(pc);
488 xe_device_mem_access_get(pc_to_xe(pc));
489 mutex_lock(&pc->freq_lock);
490 if (!pc->freq_ready) {
491 /* Might be in the middle of a gt reset */
497 * GuC SLPC plays with min freq request when GuCRC is enabled
498 * Block RC6 for a more reliable read.
500 ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
504 ret = pc_action_query_task_state(pc);
508 *freq = pc_get_min_freq(pc);
511 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
513 mutex_unlock(&pc->freq_lock);
514 xe_device_mem_access_put(pc_to_xe(pc));
519 * xe_guc_pc_set_min_freq - Set the minimal operational frequency
521 * @freq: The selected minimal frequency
523 * Returns: 0 on success,
524 * -EAGAIN if GuC PC not ready (likely in middle of a reset),
525 * -EINVAL if value out of bounds.
527 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
531 xe_device_mem_access_get(pc_to_xe(pc));
532 mutex_lock(&pc->freq_lock);
533 if (!pc->freq_ready) {
534 /* Might be in the middle of a gt reset */
539 ret = pc_set_min_freq(pc, freq);
543 pc->user_requested_min = freq;
546 mutex_unlock(&pc->freq_lock);
547 xe_device_mem_access_put(pc_to_xe(pc));
553 * xe_guc_pc_get_max_freq - Get Maximum operational frequency
555 * @freq: A pointer to a u32 where the freq value will be returned
557 * Returns: 0 on success,
558 * -EAGAIN if GuC PC not ready (likely in middle of a reset).
560 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
564 xe_device_mem_access_get(pc_to_xe(pc));
565 mutex_lock(&pc->freq_lock);
566 if (!pc->freq_ready) {
567 /* Might be in the middle of a gt reset */
572 ret = pc_action_query_task_state(pc);
576 *freq = pc_get_max_freq(pc);
579 mutex_unlock(&pc->freq_lock);
580 xe_device_mem_access_put(pc_to_xe(pc));
585 * xe_guc_pc_set_max_freq - Set the maximum operational frequency
587 * @freq: The selected maximum frequency value
589 * Returns: 0 on success,
590 * -EAGAIN if GuC PC not ready (likely in middle of a reset),
591 * -EINVAL if value out of bounds.
593 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
597 xe_device_mem_access_get(pc_to_xe(pc));
598 mutex_lock(&pc->freq_lock);
599 if (!pc->freq_ready) {
600 /* Might be in the middle of a gt reset */
605 ret = pc_set_max_freq(pc, freq);
609 pc->user_requested_max = freq;
612 mutex_unlock(&pc->freq_lock);
613 xe_device_mem_access_put(pc_to_xe(pc));
618 * xe_guc_pc_c_status - get the current GT C state
619 * @pc: XE_GuC_PC instance
621 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
623 struct xe_gt *gt = pc_to_gt(pc);
626 xe_device_mem_access_get(gt_to_xe(gt));
628 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
629 reg = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
630 gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
632 reg = xe_mmio_read32(gt, GT_CORE_STATUS);
633 gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
636 xe_device_mem_access_put(gt_to_xe(gt));
638 switch (gt_c_state) {
644 return GT_IDLE_UNKNOWN;
649 * xe_guc_pc_rc6_residency - rc6 residency counter
650 * @pc: Xe_GuC_PC instance
652 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
654 struct xe_gt *gt = pc_to_gt(pc);
657 xe_device_mem_access_get(gt_to_xe(gt));
658 reg = xe_mmio_read32(gt, GT_GFX_RC6);
659 xe_device_mem_access_put(gt_to_xe(gt));
665 * xe_guc_pc_mc6_residency - mc6 residency counter
666 * @pc: Xe_GuC_PC instance
668 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
670 struct xe_gt *gt = pc_to_gt(pc);
673 xe_device_mem_access_get(gt_to_xe(gt));
674 reg = xe_mmio_read32(gt, MTL_MEDIA_MC6);
675 xe_device_mem_access_put(gt_to_xe(gt));
680 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
682 struct xe_gt *gt = pc_to_gt(pc);
685 xe_device_assert_mem_access(pc_to_xe(pc));
687 if (xe_gt_is_media_type(gt))
688 reg = xe_mmio_read32(gt, MTL_MEDIAP_STATE_CAP);
690 reg = xe_mmio_read32(gt, MTL_RP_STATE_CAP);
692 pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
694 pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
697 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
699 struct xe_gt *gt = pc_to_gt(pc);
700 struct xe_device *xe = gt_to_xe(gt);
703 xe_device_assert_mem_access(pc_to_xe(pc));
705 if (xe->info.platform == XE_PVC)
706 reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
708 reg = xe_mmio_read32(gt, RP_STATE_CAP);
709 pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
710 pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
713 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
715 struct xe_gt *gt = pc_to_gt(pc);
716 struct xe_device *xe = gt_to_xe(gt);
718 if (GRAPHICS_VERx100(xe) >= 1270)
719 mtl_init_fused_rp_values(pc);
721 tgl_init_fused_rp_values(pc);
725 * xe_guc_pc_init_early - Initialize RPx values and request a higher GT
726 * frequency to allow faster GuC load times
727 * @pc: Xe_GuC_PC instance
729 void xe_guc_pc_init_early(struct xe_guc_pc *pc)
731 struct xe_gt *gt = pc_to_gt(pc);
733 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
734 pc_init_fused_rp_values(pc);
735 pc_set_cur_freq(pc, pc->rp0_freq);
738 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
742 lockdep_assert_held(&pc->freq_lock);
744 ret = pc_action_query_task_state(pc);
749 * GuC defaults to some RPmax that is not actually achievable without
750 * overclocking. Let's adjust it to the Hardware RP0, which is the
753 if (pc_get_max_freq(pc) > pc->rp0_freq)
754 pc_set_max_freq(pc, pc->rp0_freq);
757 * Same thing happens for Server platforms where min is listed as
760 if (pc_get_min_freq(pc) > pc->rp0_freq)
761 pc_set_min_freq(pc, pc->rp0_freq);
766 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
770 lockdep_assert_held(&pc->freq_lock);
772 if (pc->user_requested_min != 0) {
773 ret = pc_set_min_freq(pc, pc->user_requested_min);
778 if (pc->user_requested_max != 0) {
779 ret = pc_set_max_freq(pc, pc->user_requested_max);
788 * xe_guc_pc_gucrc_disable - Disable GuC RC
789 * @pc: Xe_GuC_PC instance
791 * Disables GuC RC by taking control of RC6 back from GuC.
793 * Return: 0 on success, negative error code on error.
795 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
797 struct xe_device *xe = pc_to_xe(pc);
798 struct xe_gt *gt = pc_to_gt(pc);
801 if (xe->info.skip_guc_pc)
804 xe_device_mem_access_get(pc_to_xe(pc));
806 ret = pc_action_setup_gucrc(pc, XE_GUCRC_HOST_CONTROL);
810 ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
814 xe_gt_idle_disable_c6(gt);
816 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
819 xe_device_mem_access_put(pc_to_xe(pc));
823 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
825 u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
826 u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
828 XE_WARN_ON(xe_pcode_init_min_freq_table(pc_to_gt(pc), min, max));
831 static int pc_init_freqs(struct xe_guc_pc *pc)
835 mutex_lock(&pc->freq_lock);
837 ret = pc_adjust_freq_bounds(pc);
841 ret = pc_adjust_requested_freq(pc);
845 pc_update_rp_values(pc);
847 pc_init_pcode_freq(pc);
850 * The frequencies are really ready for use only after the user
851 * requested ones got restored.
853 pc->freq_ready = true;
856 mutex_unlock(&pc->freq_lock);
861 * xe_guc_pc_start - Start GuC's Power Conservation component
862 * @pc: Xe_GuC_PC instance
864 int xe_guc_pc_start(struct xe_guc_pc *pc)
866 struct xe_device *xe = pc_to_xe(pc);
867 struct xe_gt *gt = pc_to_gt(pc);
868 u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
871 xe_gt_assert(gt, xe_device_uc_enabled(xe));
873 xe_device_mem_access_get(pc_to_xe(pc));
875 ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
877 goto out_fail_force_wake;
879 if (xe->info.skip_guc_pc) {
880 if (xe->info.platform != XE_PVC)
881 xe_gt_idle_enable_c6(gt);
883 /* Request max possible since dynamic freq mgmt is not enabled */
884 pc_set_cur_freq(pc, UINT_MAX);
890 memset(pc->bo->vmap.vaddr, 0, size);
891 slpc_shared_data_write(pc, header.size, size);
893 ret = pc_action_reset(pc);
897 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) {
898 drm_err(&pc_to_xe(pc)->drm, "GuC PC Start failed\n");
903 ret = pc_init_freqs(pc);
907 if (xe->info.platform == XE_PVC) {
908 xe_guc_pc_gucrc_disable(pc);
913 ret = pc_action_setup_gucrc(pc, XE_GUCRC_FIRMWARE_CONTROL);
916 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
918 xe_device_mem_access_put(pc_to_xe(pc));
923 * xe_guc_pc_stop - Stop GuC's Power Conservation component
924 * @pc: Xe_GuC_PC instance
926 int xe_guc_pc_stop(struct xe_guc_pc *pc)
928 struct xe_device *xe = pc_to_xe(pc);
931 xe_device_mem_access_get(pc_to_xe(pc));
933 if (xe->info.skip_guc_pc) {
934 xe_gt_idle_disable_c6(pc_to_gt(pc));
939 mutex_lock(&pc->freq_lock);
940 pc->freq_ready = false;
941 mutex_unlock(&pc->freq_lock);
943 ret = pc_action_shutdown(pc);
947 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_NOT_RUNNING)) {
948 drm_err(&pc_to_xe(pc)->drm, "GuC PC Shutdown failed\n");
953 xe_device_mem_access_put(pc_to_xe(pc));
958 * xe_guc_pc_fini - Finalize GuC's Power Conservation component
959 * @pc: Xe_GuC_PC instance
961 void xe_guc_pc_fini(struct xe_guc_pc *pc)
963 struct xe_device *xe = pc_to_xe(pc);
965 if (xe->info.skip_guc_pc) {
966 xe_gt_idle_disable_c6(pc_to_gt(pc));
970 XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
971 XE_WARN_ON(xe_guc_pc_stop(pc));
972 mutex_destroy(&pc->freq_lock);
976 * xe_guc_pc_init - Initialize GuC's Power Conservation component
977 * @pc: Xe_GuC_PC instance
979 int xe_guc_pc_init(struct xe_guc_pc *pc)
981 struct xe_gt *gt = pc_to_gt(pc);
982 struct xe_tile *tile = gt_to_tile(gt);
983 struct xe_device *xe = gt_to_xe(gt);
985 u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
987 if (xe->info.skip_guc_pc)
990 mutex_init(&pc->freq_lock);
992 bo = xe_managed_bo_create_pin_map(xe, tile, size,
993 XE_BO_CREATE_VRAM_IF_DGFX(tile) |
994 XE_BO_CREATE_GGTT_BIT);