1 // SPDX-License-Identifier: MIT
3 * Copyright © 2023 Intel Corporation
6 #include <drm/drm_managed.h>
8 #include "xe_force_wake.h"
11 #include "xe_gt_idle.h"
12 #include "xe_gt_sysfs.h"
13 #include "xe_guc_pc.h"
14 #include "regs/xe_gt_regs.h"
15 #include "xe_macros.h"
23 * Contains functions that init GT idle features like C6
25 * device/gt#/gtidle/name - name of the state
26 * device/gt#/gtidle/idle_residency_ms - Provides residency of the idle state in ms
27 * device/gt#/gtidle/idle_status - Provides current idle state
30 static struct xe_gt_idle *dev_to_gtidle(struct device *dev)
32 struct kobject *kobj = &dev->kobj;
34 return &kobj_to_gt(kobj->parent)->gtidle;
37 static struct xe_gt *gtidle_to_gt(struct xe_gt_idle *gtidle)
39 return container_of(gtidle, struct xe_gt, gtidle);
42 static struct xe_guc_pc *gtidle_to_pc(struct xe_gt_idle *gtidle)
44 return >idle_to_gt(gtidle)->uc.guc.pc;
47 static struct xe_device *
48 pc_to_xe(struct xe_guc_pc *pc)
50 struct xe_guc *guc = container_of(pc, struct xe_guc, pc);
51 struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc);
56 static const char *gt_idle_state_to_string(enum xe_gt_idle_state state)
68 static u64 get_residency_ms(struct xe_gt_idle *gtidle, u64 cur_residency)
70 u64 delta, overflow_residency, prev_residency;
72 overflow_residency = BIT_ULL(32);
75 * Counter wrap handling
76 * Store previous hw counter values for counter wrap-around handling
77 * Relying on sufficient frequency of queries otherwise counters can still wrap.
79 prev_residency = gtidle->prev_residency;
80 gtidle->prev_residency = cur_residency;
83 if (cur_residency >= prev_residency)
84 delta = cur_residency - prev_residency;
86 delta = cur_residency + (overflow_residency - prev_residency);
88 /* Add delta to extended raw driver copy of idle residency */
89 cur_residency = gtidle->cur_residency + delta;
90 gtidle->cur_residency = cur_residency;
92 /* residency multiplier in ns, convert to ms */
93 cur_residency = mul_u64_u32_div(cur_residency, gtidle->residency_multiplier, 1e6);
98 void xe_gt_idle_enable_pg(struct xe_gt *gt)
100 struct xe_device *xe = gt_to_xe(gt);
101 struct xe_gt_idle *gtidle = >->gtidle;
102 struct xe_mmio *mmio = >->mmio;
103 u32 vcs_mask, vecs_mask;
110 /* Disable CPG for PVC */
111 if (xe->info.platform == XE_PVC)
114 xe_device_assert_mem_access(gt_to_xe(gt));
116 vcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE);
117 vecs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE);
119 if (vcs_mask || vecs_mask)
120 gtidle->powergate_enable = MEDIA_POWERGATE_ENABLE;
122 if (!xe_gt_is_media_type(gt))
123 gtidle->powergate_enable |= RENDER_POWERGATE_ENABLE;
125 for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
126 if ((gt->info.engine_mask & BIT(i)))
127 gtidle->powergate_enable |= (VDN_HCP_POWERGATE_ENABLE(j) |
128 VDN_MFXVDENC_POWERGATE_ENABLE(j));
131 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
132 if (xe->info.skip_guc_pc) {
134 * GuC sets the hysteresis value when GuC PC is enabled
135 * else set it to 25 (25 * 1.28us)
137 xe_mmio_write32(mmio, MEDIA_POWERGATE_IDLE_HYSTERESIS, 25);
138 xe_mmio_write32(mmio, RENDER_POWERGATE_IDLE_HYSTERESIS, 25);
141 xe_mmio_write32(mmio, POWERGATE_ENABLE, gtidle->powergate_enable);
142 xe_force_wake_put(gt_to_fw(gt), fw_ref);
145 void xe_gt_idle_disable_pg(struct xe_gt *gt)
147 struct xe_gt_idle *gtidle = >->gtidle;
150 if (IS_SRIOV_VF(gt_to_xe(gt)))
153 xe_device_assert_mem_access(gt_to_xe(gt));
154 gtidle->powergate_enable = 0;
156 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
157 xe_mmio_write32(>->mmio, POWERGATE_ENABLE, gtidle->powergate_enable);
158 xe_force_wake_put(gt_to_fw(gt), fw_ref);
162 * xe_gt_idle_pg_print - Xe powergating info
166 * This function prints the powergating information
168 * Return: 0 on success, negative error code otherwise
170 int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p)
172 struct xe_gt_idle *gtidle = >->gtidle;
173 struct xe_device *xe = gt_to_xe(gt);
174 enum xe_gt_idle_state state;
175 u32 pg_enabled, pg_status = 0;
176 u32 vcs_mask, vecs_mask;
182 * Slice 0: VCS0, VCS1, VECS0
183 * Slice 1: VCS2, VCS3, VECS1
184 * Slice 2: VCS4, VCS5, VECS2
185 * Slice 3: VCS6, VCS7, VECS3
187 static const struct {
191 {(BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS1) |
192 BIT(XE_HW_ENGINE_VECS0)), MEDIA_SLICE0_AWAKE_STATUS},
194 {(BIT(XE_HW_ENGINE_VCS2) | BIT(XE_HW_ENGINE_VCS3) |
195 BIT(XE_HW_ENGINE_VECS1)), MEDIA_SLICE1_AWAKE_STATUS},
197 {(BIT(XE_HW_ENGINE_VCS4) | BIT(XE_HW_ENGINE_VCS5) |
198 BIT(XE_HW_ENGINE_VECS2)), MEDIA_SLICE2_AWAKE_STATUS},
200 {(BIT(XE_HW_ENGINE_VCS6) | BIT(XE_HW_ENGINE_VCS7) |
201 BIT(XE_HW_ENGINE_VECS3)), MEDIA_SLICE3_AWAKE_STATUS},
204 if (xe->info.platform == XE_PVC) {
205 drm_printf(p, "Power Gating not supported\n");
209 state = gtidle->idle_status(gtidle_to_pc(gtidle));
210 pg_enabled = gtidle->powergate_enable;
212 /* Do not wake the GT to read powergating status */
213 if (state != GT_IDLE_C6) {
214 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
218 pg_enabled = xe_mmio_read32(>->mmio, POWERGATE_ENABLE);
219 pg_status = xe_mmio_read32(>->mmio, POWERGATE_DOMAIN_STATUS);
221 xe_force_wake_put(gt_to_fw(gt), fw_ref);
224 if (gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK) {
225 drm_printf(p, "Render Power Gating Enabled: %s\n",
226 str_yes_no(pg_enabled & RENDER_POWERGATE_ENABLE));
228 drm_printf(p, "Render Power Gate Status: %s\n",
229 str_up_down(pg_status & RENDER_AWAKE_STATUS));
232 vcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE);
233 vecs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE);
235 /* Print media CPG status only if media is present */
236 if (vcs_mask || vecs_mask) {
237 drm_printf(p, "Media Power Gating Enabled: %s\n",
238 str_yes_no(pg_enabled & MEDIA_POWERGATE_ENABLE));
240 for (n = 0; n < ARRAY_SIZE(media_slices); n++)
241 if (gt->info.engine_mask & media_slices[n].engines)
242 drm_printf(p, "Media Slice%d Power Gate Status: %s\n", n,
243 str_up_down(pg_status & media_slices[n].status_bit));
248 static ssize_t name_show(struct device *dev,
249 struct device_attribute *attr, char *buff)
251 struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
252 struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
255 xe_pm_runtime_get(pc_to_xe(pc));
256 ret = sysfs_emit(buff, "%s\n", gtidle->name);
257 xe_pm_runtime_put(pc_to_xe(pc));
261 static DEVICE_ATTR_RO(name);
263 static ssize_t idle_status_show(struct device *dev,
264 struct device_attribute *attr, char *buff)
266 struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
267 struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
268 enum xe_gt_idle_state state;
270 xe_pm_runtime_get(pc_to_xe(pc));
271 state = gtidle->idle_status(pc);
272 xe_pm_runtime_put(pc_to_xe(pc));
274 return sysfs_emit(buff, "%s\n", gt_idle_state_to_string(state));
276 static DEVICE_ATTR_RO(idle_status);
278 static ssize_t idle_residency_ms_show(struct device *dev,
279 struct device_attribute *attr, char *buff)
281 struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
282 struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
285 xe_pm_runtime_get(pc_to_xe(pc));
286 residency = gtidle->idle_residency(pc);
287 xe_pm_runtime_put(pc_to_xe(pc));
289 return sysfs_emit(buff, "%llu\n", get_residency_ms(gtidle, residency));
291 static DEVICE_ATTR_RO(idle_residency_ms);
293 static const struct attribute *gt_idle_attrs[] = {
295 &dev_attr_idle_status.attr,
296 &dev_attr_idle_residency_ms.attr,
300 static void gt_idle_fini(void *arg)
302 struct kobject *kobj = arg;
303 struct xe_gt *gt = kobj_to_gt(kobj->parent);
306 xe_gt_idle_disable_pg(gt);
308 if (gt_to_xe(gt)->info.skip_guc_pc) {
309 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
310 xe_gt_idle_disable_c6(gt);
311 xe_force_wake_put(gt_to_fw(gt), fw_ref);
314 sysfs_remove_files(kobj, gt_idle_attrs);
318 int xe_gt_idle_init(struct xe_gt_idle *gtidle)
320 struct xe_gt *gt = gtidle_to_gt(gtidle);
321 struct xe_device *xe = gt_to_xe(gt);
322 struct kobject *kobj;
328 kobj = kobject_create_and_add("gtidle", gt->sysfs);
332 if (xe_gt_is_media_type(gt)) {
333 snprintf(gtidle->name, sizeof(gtidle->name), "gt%d-mc", gt->info.id);
334 gtidle->idle_residency = xe_guc_pc_mc6_residency;
336 snprintf(gtidle->name, sizeof(gtidle->name), "gt%d-rc", gt->info.id);
337 gtidle->idle_residency = xe_guc_pc_rc6_residency;
340 /* Multiplier for Residency counter in units of 1.28us */
341 gtidle->residency_multiplier = 1280;
342 gtidle->idle_status = xe_guc_pc_c_status;
344 err = sysfs_create_files(kobj, gt_idle_attrs);
350 xe_gt_idle_enable_pg(gt);
352 return devm_add_action_or_reset(xe->drm.dev, gt_idle_fini, kobj);
355 void xe_gt_idle_enable_c6(struct xe_gt *gt)
357 xe_device_assert_mem_access(gt_to_xe(gt));
358 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
360 if (IS_SRIOV_VF(gt_to_xe(gt)))
363 /* Units of 1280 ns for a total of 5s */
364 xe_mmio_write32(>->mmio, RC_IDLE_HYSTERSIS, 0x3B9ACA);
366 xe_mmio_write32(>->mmio, RC_CONTROL,
367 RC_CTL_HW_ENABLE | RC_CTL_TO_MODE | RC_CTL_RC6_ENABLE);
370 void xe_gt_idle_disable_c6(struct xe_gt *gt)
372 xe_device_assert_mem_access(gt_to_xe(gt));
373 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
375 if (IS_SRIOV_VF(gt_to_xe(gt)))
378 xe_mmio_write32(>->mmio, RC_CONTROL, 0);
379 xe_mmio_write32(>->mmio, RC_STATE, 0);