1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
6 #include "xe_hw_engine.h"
8 #include <drm/drm_managed.h>
10 #include "regs/xe_engine_regs.h"
11 #include "regs/xe_gt_regs.h"
12 #include "xe_assert.h"
14 #include "xe_device.h"
15 #include "xe_execlist.h"
16 #include "xe_force_wake.h"
18 #include "xe_gt_ccs_mode.h"
19 #include "xe_gt_topology.h"
20 #include "xe_hw_fence.h"
23 #include "xe_macros.h"
25 #include "xe_reg_sr.h"
27 #include "xe_sched_job.h"
29 #include "xe_tuning.h"
33 #define MAX_MMIO_BASES 3
36 unsigned int class : 8;
37 unsigned int instance : 8;
38 unsigned int irq_offset : 8;
39 enum xe_force_wake_domains domain;
43 static const struct engine_info engine_infos[] = {
44 [XE_HW_ENGINE_RCS0] = {
46 .class = XE_ENGINE_CLASS_RENDER,
48 .irq_offset = ilog2(INTR_RCS0),
49 .domain = XE_FW_RENDER,
50 .mmio_base = RENDER_RING_BASE,
52 [XE_HW_ENGINE_BCS0] = {
54 .class = XE_ENGINE_CLASS_COPY,
56 .irq_offset = ilog2(INTR_BCS(0)),
57 .domain = XE_FW_RENDER,
58 .mmio_base = BLT_RING_BASE,
60 [XE_HW_ENGINE_BCS1] = {
62 .class = XE_ENGINE_CLASS_COPY,
64 .irq_offset = ilog2(INTR_BCS(1)),
65 .domain = XE_FW_RENDER,
66 .mmio_base = XEHPC_BCS1_RING_BASE,
68 [XE_HW_ENGINE_BCS2] = {
70 .class = XE_ENGINE_CLASS_COPY,
72 .irq_offset = ilog2(INTR_BCS(2)),
73 .domain = XE_FW_RENDER,
74 .mmio_base = XEHPC_BCS2_RING_BASE,
76 [XE_HW_ENGINE_BCS3] = {
78 .class = XE_ENGINE_CLASS_COPY,
80 .irq_offset = ilog2(INTR_BCS(3)),
81 .domain = XE_FW_RENDER,
82 .mmio_base = XEHPC_BCS3_RING_BASE,
84 [XE_HW_ENGINE_BCS4] = {
86 .class = XE_ENGINE_CLASS_COPY,
88 .irq_offset = ilog2(INTR_BCS(4)),
89 .domain = XE_FW_RENDER,
90 .mmio_base = XEHPC_BCS4_RING_BASE,
92 [XE_HW_ENGINE_BCS5] = {
94 .class = XE_ENGINE_CLASS_COPY,
96 .irq_offset = ilog2(INTR_BCS(5)),
97 .domain = XE_FW_RENDER,
98 .mmio_base = XEHPC_BCS5_RING_BASE,
100 [XE_HW_ENGINE_BCS6] = {
102 .class = XE_ENGINE_CLASS_COPY,
104 .irq_offset = ilog2(INTR_BCS(6)),
105 .domain = XE_FW_RENDER,
106 .mmio_base = XEHPC_BCS6_RING_BASE,
108 [XE_HW_ENGINE_BCS7] = {
110 .class = XE_ENGINE_CLASS_COPY,
111 .irq_offset = ilog2(INTR_BCS(7)),
113 .domain = XE_FW_RENDER,
114 .mmio_base = XEHPC_BCS7_RING_BASE,
116 [XE_HW_ENGINE_BCS8] = {
118 .class = XE_ENGINE_CLASS_COPY,
120 .irq_offset = ilog2(INTR_BCS8),
121 .domain = XE_FW_RENDER,
122 .mmio_base = XEHPC_BCS8_RING_BASE,
125 [XE_HW_ENGINE_VCS0] = {
127 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
129 .irq_offset = 32 + ilog2(INTR_VCS(0)),
130 .domain = XE_FW_MEDIA_VDBOX0,
131 .mmio_base = BSD_RING_BASE,
133 [XE_HW_ENGINE_VCS1] = {
135 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
137 .irq_offset = 32 + ilog2(INTR_VCS(1)),
138 .domain = XE_FW_MEDIA_VDBOX1,
139 .mmio_base = BSD2_RING_BASE,
141 [XE_HW_ENGINE_VCS2] = {
143 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
145 .irq_offset = 32 + ilog2(INTR_VCS(2)),
146 .domain = XE_FW_MEDIA_VDBOX2,
147 .mmio_base = BSD3_RING_BASE,
149 [XE_HW_ENGINE_VCS3] = {
151 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
153 .irq_offset = 32 + ilog2(INTR_VCS(3)),
154 .domain = XE_FW_MEDIA_VDBOX3,
155 .mmio_base = BSD4_RING_BASE,
157 [XE_HW_ENGINE_VCS4] = {
159 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
161 .irq_offset = 32 + ilog2(INTR_VCS(4)),
162 .domain = XE_FW_MEDIA_VDBOX4,
163 .mmio_base = XEHP_BSD5_RING_BASE,
165 [XE_HW_ENGINE_VCS5] = {
167 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
169 .irq_offset = 32 + ilog2(INTR_VCS(5)),
170 .domain = XE_FW_MEDIA_VDBOX5,
171 .mmio_base = XEHP_BSD6_RING_BASE,
173 [XE_HW_ENGINE_VCS6] = {
175 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
177 .irq_offset = 32 + ilog2(INTR_VCS(6)),
178 .domain = XE_FW_MEDIA_VDBOX6,
179 .mmio_base = XEHP_BSD7_RING_BASE,
181 [XE_HW_ENGINE_VCS7] = {
183 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
185 .irq_offset = 32 + ilog2(INTR_VCS(7)),
186 .domain = XE_FW_MEDIA_VDBOX7,
187 .mmio_base = XEHP_BSD8_RING_BASE,
189 [XE_HW_ENGINE_VECS0] = {
191 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
193 .irq_offset = 32 + ilog2(INTR_VECS(0)),
194 .domain = XE_FW_MEDIA_VEBOX0,
195 .mmio_base = VEBOX_RING_BASE,
197 [XE_HW_ENGINE_VECS1] = {
199 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
201 .irq_offset = 32 + ilog2(INTR_VECS(1)),
202 .domain = XE_FW_MEDIA_VEBOX1,
203 .mmio_base = VEBOX2_RING_BASE,
205 [XE_HW_ENGINE_VECS2] = {
207 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
209 .irq_offset = 32 + ilog2(INTR_VECS(2)),
210 .domain = XE_FW_MEDIA_VEBOX2,
211 .mmio_base = XEHP_VEBOX3_RING_BASE,
213 [XE_HW_ENGINE_VECS3] = {
215 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
217 .irq_offset = 32 + ilog2(INTR_VECS(3)),
218 .domain = XE_FW_MEDIA_VEBOX3,
219 .mmio_base = XEHP_VEBOX4_RING_BASE,
221 [XE_HW_ENGINE_CCS0] = {
223 .class = XE_ENGINE_CLASS_COMPUTE,
225 .irq_offset = ilog2(INTR_CCS(0)),
226 .domain = XE_FW_RENDER,
227 .mmio_base = COMPUTE0_RING_BASE,
229 [XE_HW_ENGINE_CCS1] = {
231 .class = XE_ENGINE_CLASS_COMPUTE,
233 .irq_offset = ilog2(INTR_CCS(1)),
234 .domain = XE_FW_RENDER,
235 .mmio_base = COMPUTE1_RING_BASE,
237 [XE_HW_ENGINE_CCS2] = {
239 .class = XE_ENGINE_CLASS_COMPUTE,
241 .irq_offset = ilog2(INTR_CCS(2)),
242 .domain = XE_FW_RENDER,
243 .mmio_base = COMPUTE2_RING_BASE,
245 [XE_HW_ENGINE_CCS3] = {
247 .class = XE_ENGINE_CLASS_COMPUTE,
249 .irq_offset = ilog2(INTR_CCS(3)),
250 .domain = XE_FW_RENDER,
251 .mmio_base = COMPUTE3_RING_BASE,
253 [XE_HW_ENGINE_GSCCS0] = {
255 .class = XE_ENGINE_CLASS_OTHER,
256 .instance = OTHER_GSC_INSTANCE,
258 .mmio_base = GSCCS_RING_BASE,
262 static void hw_engine_fini(struct drm_device *drm, void *arg)
264 struct xe_hw_engine *hwe = arg;
267 xe_execlist_port_destroy(hwe->exl_port);
268 xe_lrc_finish(&hwe->kernel_lrc);
273 static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg,
276 xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
277 xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
279 reg.addr += hwe->mmio_base;
281 xe_mmio_write32(hwe->gt, reg, val);
284 static u32 hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
286 xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
287 xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
289 reg.addr += hwe->mmio_base;
291 return xe_mmio_read32(hwe->gt, reg);
294 void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
297 xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
299 if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
300 xe_mmio_write32(hwe->gt, RCU_MODE,
301 _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
303 hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
304 hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
305 xe_bo_ggtt_addr(hwe->hwsp));
306 hw_engine_mmio_write32(hwe, RING_MODE(0),
307 _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
308 hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
309 _MASKED_BIT_DISABLE(STOP_RING));
310 hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
313 static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt *gt,
314 const struct xe_hw_engine *hwe)
316 return xe_gt_ccs_mode_enabled(gt) &&
317 xe_rtp_match_first_render_or_compute(gt, hwe);
320 static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_gt *gt,
321 const struct xe_hw_engine *hwe)
323 if (GRAPHICS_VER(gt_to_xe(gt)) < 20)
326 if (hwe->class != XE_ENGINE_CLASS_COMPUTE &&
327 hwe->class != XE_ENGINE_CLASS_RENDER)
330 return xe_mmio_read32(hwe->gt, XEHP_FUSE4) & CFEG_WMTP_DISABLE;
334 xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
336 struct xe_gt *gt = hwe->gt;
337 const u8 mocs_write_idx = gt->mocs.uc_index;
338 const u8 mocs_read_idx = gt->mocs.uc_index;
339 u32 blit_cctl_val = REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, mocs_write_idx) |
340 REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, mocs_read_idx);
341 struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
342 const struct xe_rtp_entry_sr lrc_was[] = {
344 * Some blitter commands do not have a field for MOCS, those
345 * commands will use MOCS index pointed by BLIT_CCTL.
346 * BLIT_CCTL registers are needed to be programmed to un-cached.
348 { XE_RTP_NAME("BLIT_CCTL_default_MOCS"),
349 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED),
351 XE_RTP_ACTIONS(FIELD_SET(BLIT_CCTL(0),
352 BLIT_CCTL_DST_MOCS_MASK |
353 BLIT_CCTL_SRC_MOCS_MASK,
355 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
357 /* Use Fixed slice CCS mode */
358 { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
359 XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
360 XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
361 RCU_MODE_FIXED_SLICE_CCS_MODE))
363 /* Disable WMTP if HW doesn't support it */
364 { XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
365 XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
366 XE_RTP_ACTIONS(FIELD_SET(CS_CHICKEN1(0),
367 PREEMPT_GPGPU_LEVEL_MASK,
368 PREEMPT_GPGPU_THREAD_GROUP_LEVEL)),
369 XE_RTP_ENTRY_FLAG(FOREACH_ENGINE)
374 xe_rtp_process_to_sr(&ctx, lrc_was, &hwe->reg_lrc);
378 hw_engine_setup_default_state(struct xe_hw_engine *hwe)
380 struct xe_gt *gt = hwe->gt;
381 struct xe_device *xe = gt_to_xe(gt);
383 * RING_CMD_CCTL specifies the default MOCS entry that will be
384 * used by the command streamer when executing commands that
385 * don't have a way to explicitly specify a MOCS setting.
386 * The default should usually reference whichever MOCS entry
387 * corresponds to uncached behavior, although use of a WB cached
388 * entry is recommended by the spec in certain circumstances on
389 * specific platforms.
392 const u8 mocs_write_idx = gt->mocs.uc_index;
393 const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE &&
394 (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
395 gt->mocs.wb_index : gt->mocs.uc_index;
396 u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
397 REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, mocs_read_idx);
398 struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
399 const struct xe_rtp_entry_sr engine_entries[] = {
400 { XE_RTP_NAME("RING_CMD_CCTL_default_MOCS"),
401 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED)),
402 XE_RTP_ACTIONS(FIELD_SET(RING_CMD_CCTL(0),
403 CMD_CCTL_WRITE_OVERRIDE_MASK |
404 CMD_CCTL_READ_OVERRIDE_MASK,
406 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
409 * To allow the GSC engine to go idle on MTL we need to enable
410 * idle messaging and set the hysteresis value (we use 0xA=5us
411 * as recommended in spec). On platforms after MTL this is
412 * enabled by default.
414 { XE_RTP_NAME("MTL GSCCS IDLE MSG enable"),
415 XE_RTP_RULES(MEDIA_VERSION(1300), ENGINE_CLASS(OTHER)),
416 XE_RTP_ACTIONS(CLR(RING_PSMI_CTL(0),
418 XE_RTP_ACTION_FLAG(ENGINE_BASE)),
419 FIELD_SET(RING_PWRCTX_MAXCNT(0),
422 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
427 xe_rtp_process_to_sr(&ctx, engine_entries, &hwe->reg_sr);
430 static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
431 enum xe_hw_engine_id id)
433 const struct engine_info *info;
435 if (WARN_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name))
438 if (!(gt->info.engine_mask & BIT(id)))
441 info = &engine_infos[id];
443 xe_gt_assert(gt, !hwe->gt);
446 hwe->class = info->class;
447 hwe->instance = info->instance;
448 hwe->mmio_base = info->mmio_base;
449 hwe->irq_offset = info->irq_offset;
450 hwe->domain = info->domain;
451 hwe->name = info->name;
452 hwe->fence_irq = >->fence_irq[info->class];
455 hwe->eclass = >->eclass[hwe->class];
456 if (!hwe->eclass->sched_props.job_timeout_ms) {
457 hwe->eclass->sched_props.job_timeout_ms = 5 * 1000;
458 hwe->eclass->sched_props.job_timeout_min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
459 hwe->eclass->sched_props.job_timeout_max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
460 hwe->eclass->sched_props.timeslice_us = 1 * 1000;
461 hwe->eclass->sched_props.timeslice_min = XE_HW_ENGINE_TIMESLICE_MIN;
462 hwe->eclass->sched_props.timeslice_max = XE_HW_ENGINE_TIMESLICE_MAX;
463 hwe->eclass->sched_props.preempt_timeout_us = XE_HW_ENGINE_PREEMPT_TIMEOUT;
464 hwe->eclass->sched_props.preempt_timeout_min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
465 hwe->eclass->sched_props.preempt_timeout_max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
466 /* Record default props */
467 hwe->eclass->defaults = hwe->eclass->sched_props;
470 xe_reg_sr_init(&hwe->reg_sr, hwe->name, gt_to_xe(gt));
471 xe_tuning_process_engine(hwe);
472 xe_wa_process_engine(hwe);
473 hw_engine_setup_default_state(hwe);
475 xe_reg_sr_init(&hwe->reg_whitelist, hwe->name, gt_to_xe(gt));
476 xe_reg_whitelist_process_engine(hwe);
479 static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
480 enum xe_hw_engine_id id)
482 struct xe_device *xe = gt_to_xe(gt);
483 struct xe_tile *tile = gt_to_tile(gt);
486 xe_gt_assert(gt, id < ARRAY_SIZE(engine_infos) && engine_infos[id].name);
487 xe_gt_assert(gt, gt->info.engine_mask & BIT(id));
489 xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
490 xe_reg_sr_apply_whitelist(hwe);
492 hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
493 XE_BO_CREATE_VRAM_IF_DGFX(tile) |
494 XE_BO_CREATE_GGTT_BIT);
495 if (IS_ERR(hwe->hwsp)) {
496 err = PTR_ERR(hwe->hwsp);
500 err = xe_lrc_init(&hwe->kernel_lrc, hwe, NULL, NULL, SZ_16K);
504 if (!xe_device_uc_enabled(xe)) {
505 hwe->exl_port = xe_execlist_port_create(xe, hwe);
506 if (IS_ERR(hwe->exl_port)) {
507 err = PTR_ERR(hwe->exl_port);
512 if (xe_device_uc_enabled(xe))
513 xe_hw_engine_enable_ring(hwe);
515 /* We reserve the highest BCS instance for USM */
516 if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY)
517 gt->usm.reserved_bcs_instance = hwe->instance;
519 err = drmm_add_action_or_reset(&xe->drm, hw_engine_fini, hwe);
526 xe_lrc_finish(&hwe->kernel_lrc);
528 xe_bo_unpin_map_no_vm(hwe->hwsp);
535 static void hw_engine_setup_logical_mapping(struct xe_gt *gt)
539 /* FIXME: Doing a simple logical mapping that works for most hardware */
540 for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
541 struct xe_hw_engine *hwe;
542 enum xe_hw_engine_id id;
543 int logical_instance = 0;
545 for_each_hw_engine(hwe, gt, id)
546 if (hwe->class == class)
547 hwe->logical_instance = logical_instance++;
551 static void read_media_fuses(struct xe_gt *gt)
553 struct xe_device *xe = gt_to_xe(gt);
559 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
561 media_fuse = xe_mmio_read32(gt, GT_VEBOX_VDBOX_DISABLE);
564 * Pre-Xe_HP platforms had register bits representing absent engines,
565 * whereas Xe_HP and beyond have bits representing present engines.
566 * Invert the polarity on old platforms so that we can use common
569 if (GRAPHICS_VERx100(xe) < 1250)
570 media_fuse = ~media_fuse;
572 vdbox_mask = REG_FIELD_GET(GT_VDBOX_DISABLE_MASK, media_fuse);
573 vebox_mask = REG_FIELD_GET(GT_VEBOX_DISABLE_MASK, media_fuse);
575 for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
576 if (!(gt->info.engine_mask & BIT(i)))
579 if (!(BIT(j) & vdbox_mask)) {
580 gt->info.engine_mask &= ~BIT(i);
581 drm_info(&xe->drm, "vcs%u fused off\n", j);
585 for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
586 if (!(gt->info.engine_mask & BIT(i)))
589 if (!(BIT(j) & vebox_mask)) {
590 gt->info.engine_mask &= ~BIT(i);
591 drm_info(&xe->drm, "vecs%u fused off\n", j);
596 static void read_copy_fuses(struct xe_gt *gt)
598 struct xe_device *xe = gt_to_xe(gt);
601 if (GRAPHICS_VERx100(xe) < 1260 || GRAPHICS_VERx100(xe) >= 1270)
604 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
606 bcs_mask = xe_mmio_read32(gt, MIRROR_FUSE3);
607 bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask);
609 /* BCS0 is always present; only BCS1-BCS8 may be fused off */
610 for (int i = XE_HW_ENGINE_BCS1, j = 0; i <= XE_HW_ENGINE_BCS8; ++i, ++j) {
611 if (!(gt->info.engine_mask & BIT(i)))
614 if (!(BIT(j / 2) & bcs_mask)) {
615 gt->info.engine_mask &= ~BIT(i);
616 drm_info(&xe->drm, "bcs%u fused off\n", j);
621 static void read_compute_fuses_from_dss(struct xe_gt *gt)
623 struct xe_device *xe = gt_to_xe(gt);
626 * CCS fusing based on DSS masks only applies to platforms that can
627 * have more than one CCS.
629 if (hweight64(gt->info.engine_mask &
630 GENMASK_ULL(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)) <= 1)
634 * CCS availability on Xe_HP is inferred from the presence of DSS in
637 for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
638 if (!(gt->info.engine_mask & BIT(i)))
641 if (!xe_gt_topology_has_dss_in_quadrant(gt, j)) {
642 gt->info.engine_mask &= ~BIT(i);
643 drm_info(&xe->drm, "ccs%u fused off\n", j);
648 static void read_compute_fuses_from_reg(struct xe_gt *gt)
650 struct xe_device *xe = gt_to_xe(gt);
653 ccs_mask = xe_mmio_read32(gt, XEHP_FUSE4);
654 ccs_mask = REG_FIELD_GET(CCS_EN_MASK, ccs_mask);
656 for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
657 if (!(gt->info.engine_mask & BIT(i)))
660 if ((ccs_mask & BIT(j)) == 0) {
661 gt->info.engine_mask &= ~BIT(i);
662 drm_info(&xe->drm, "ccs%u fused off\n", j);
667 static void read_compute_fuses(struct xe_gt *gt)
669 if (GRAPHICS_VER(gt_to_xe(gt)) >= 20)
670 read_compute_fuses_from_reg(gt);
672 read_compute_fuses_from_dss(gt);
675 static void check_gsc_availability(struct xe_gt *gt)
677 struct xe_device *xe = gt_to_xe(gt);
679 if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
683 * The GSCCS is only used to communicate with the GSC FW, so if we don't
684 * have the FW there is nothing we need the engine for and can therefore
685 * skip its initialization.
687 if (!xe_uc_fw_is_available(>->uc.gsc.fw)) {
688 gt->info.engine_mask &= ~BIT(XE_HW_ENGINE_GSCCS0);
689 drm_info(&xe->drm, "gsccs disabled due to lack of FW\n");
693 int xe_hw_engines_init_early(struct xe_gt *gt)
697 read_media_fuses(gt);
699 read_compute_fuses(gt);
700 check_gsc_availability(gt);
702 BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT < XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN);
703 BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT > XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX);
705 for (i = 0; i < ARRAY_SIZE(gt->hw_engines); i++)
706 hw_engine_init_early(gt, >->hw_engines[i], i);
711 int xe_hw_engines_init(struct xe_gt *gt)
714 struct xe_hw_engine *hwe;
715 enum xe_hw_engine_id id;
717 for_each_hw_engine(hwe, gt, id) {
718 err = hw_engine_init(gt, hwe, id);
723 hw_engine_setup_logical_mapping(gt);
728 void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
730 wake_up_all(>_to_xe(hwe->gt)->ufence_wq);
732 if (hwe->irq_handler)
733 hwe->irq_handler(hwe, intr_vec);
735 if (intr_vec & GT_RENDER_USER_INTERRUPT)
736 xe_hw_fence_irq_run(hwe->fence_irq);
740 * xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine.
741 * @hwe: Xe HW Engine.
743 * This can be printed out in a later stage like during dev_coredump
746 * Returns: a Xe HW Engine snapshot object that must be freed by the
747 * caller, using `xe_hw_engine_snapshot_free`.
749 struct xe_hw_engine_snapshot *
750 xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
752 struct xe_hw_engine_snapshot *snapshot;
755 if (!xe_hw_engine_is_valid(hwe))
758 snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
763 snapshot->name = kstrdup(hwe->name, GFP_ATOMIC);
764 snapshot->class = hwe->class;
765 snapshot->logical_instance = hwe->logical_instance;
766 snapshot->forcewake.domain = hwe->domain;
767 snapshot->forcewake.ref = xe_force_wake_ref(gt_to_fw(hwe->gt),
769 snapshot->mmio_base = hwe->mmio_base;
771 /* no more VF accessible data below this point */
772 if (IS_SRIOV_VF(gt_to_xe(hwe->gt)))
775 snapshot->reg.ring_execlist_status =
776 hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0));
777 val = hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0));
778 snapshot->reg.ring_execlist_status |= val << 32;
780 snapshot->reg.ring_execlist_sq_contents =
781 hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_LO(0));
782 val = hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_HI(0));
783 snapshot->reg.ring_execlist_sq_contents |= val << 32;
785 snapshot->reg.ring_acthd = hw_engine_mmio_read32(hwe, RING_ACTHD(0));
786 val = hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0));
787 snapshot->reg.ring_acthd |= val << 32;
789 snapshot->reg.ring_bbaddr = hw_engine_mmio_read32(hwe, RING_BBADDR(0));
790 val = hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0));
791 snapshot->reg.ring_bbaddr |= val << 32;
793 snapshot->reg.ring_dma_fadd =
794 hw_engine_mmio_read32(hwe, RING_DMA_FADD(0));
795 val = hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0));
796 snapshot->reg.ring_dma_fadd |= val << 32;
798 snapshot->reg.ring_hwstam = hw_engine_mmio_read32(hwe, RING_HWSTAM(0));
799 snapshot->reg.ring_hws_pga = hw_engine_mmio_read32(hwe, RING_HWS_PGA(0));
800 snapshot->reg.ring_start = hw_engine_mmio_read32(hwe, RING_START(0));
801 snapshot->reg.ring_head =
802 hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR;
803 snapshot->reg.ring_tail =
804 hw_engine_mmio_read32(hwe, RING_TAIL(0)) & TAIL_ADDR;
805 snapshot->reg.ring_ctl = hw_engine_mmio_read32(hwe, RING_CTL(0));
806 snapshot->reg.ring_mi_mode =
807 hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
808 snapshot->reg.ring_mode = hw_engine_mmio_read32(hwe, RING_MODE(0));
809 snapshot->reg.ring_imr = hw_engine_mmio_read32(hwe, RING_IMR(0));
810 snapshot->reg.ring_esr = hw_engine_mmio_read32(hwe, RING_ESR(0));
811 snapshot->reg.ring_emr = hw_engine_mmio_read32(hwe, RING_EMR(0));
812 snapshot->reg.ring_eir = hw_engine_mmio_read32(hwe, RING_EIR(0));
813 snapshot->reg.ipehr = hw_engine_mmio_read32(hwe, RING_IPEHR(0));
815 if (snapshot->class == XE_ENGINE_CLASS_COMPUTE)
816 snapshot->reg.rcu_mode = xe_mmio_read32(hwe->gt, RCU_MODE);
822 * xe_hw_engine_snapshot_print - Print out a given Xe HW Engine snapshot.
823 * @snapshot: Xe HW Engine snapshot object.
824 * @p: drm_printer where it will be printed out.
826 * This function prints out a given Xe HW Engine snapshot object.
828 void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot,
829 struct drm_printer *p)
834 drm_printf(p, "%s (physical), logical instance=%d\n",
835 snapshot->name ? snapshot->name : "",
836 snapshot->logical_instance);
837 drm_printf(p, "\tForcewake: domain 0x%x, ref %d\n",
838 snapshot->forcewake.domain, snapshot->forcewake.ref);
839 drm_printf(p, "\tHWSTAM: 0x%08x\n", snapshot->reg.ring_hwstam);
840 drm_printf(p, "\tRING_HWS_PGA: 0x%08x\n", snapshot->reg.ring_hws_pga);
841 drm_printf(p, "\tRING_EXECLIST_STATUS: 0x%016llx\n",
842 snapshot->reg.ring_execlist_status);
843 drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS: 0x%016llx\n",
844 snapshot->reg.ring_execlist_sq_contents);
845 drm_printf(p, "\tRING_START: 0x%08x\n", snapshot->reg.ring_start);
846 drm_printf(p, "\tRING_HEAD: 0x%08x\n", snapshot->reg.ring_head);
847 drm_printf(p, "\tRING_TAIL: 0x%08x\n", snapshot->reg.ring_tail);
848 drm_printf(p, "\tRING_CTL: 0x%08x\n", snapshot->reg.ring_ctl);
849 drm_printf(p, "\tRING_MI_MODE: 0x%08x\n", snapshot->reg.ring_mi_mode);
850 drm_printf(p, "\tRING_MODE: 0x%08x\n",
851 snapshot->reg.ring_mode);
852 drm_printf(p, "\tRING_IMR: 0x%08x\n", snapshot->reg.ring_imr);
853 drm_printf(p, "\tRING_ESR: 0x%08x\n", snapshot->reg.ring_esr);
854 drm_printf(p, "\tRING_EMR: 0x%08x\n", snapshot->reg.ring_emr);
855 drm_printf(p, "\tRING_EIR: 0x%08x\n", snapshot->reg.ring_eir);
856 drm_printf(p, "\tACTHD: 0x%016llx\n", snapshot->reg.ring_acthd);
857 drm_printf(p, "\tBBADDR: 0x%016llx\n", snapshot->reg.ring_bbaddr);
858 drm_printf(p, "\tDMA_FADDR: 0x%016llx\n", snapshot->reg.ring_dma_fadd);
859 drm_printf(p, "\tIPEHR: 0x%08x\n", snapshot->reg.ipehr);
860 if (snapshot->class == XE_ENGINE_CLASS_COMPUTE)
861 drm_printf(p, "\tRCU_MODE: 0x%08x\n",
862 snapshot->reg.rcu_mode);
866 * xe_hw_engine_snapshot_free - Free all allocated objects for a given snapshot.
867 * @snapshot: Xe HW Engine snapshot object.
869 * This function free all the memory that needed to be allocated at capture
872 void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot)
877 kfree(snapshot->name);
882 * xe_hw_engine_print - Xe HW Engine Print.
883 * @hwe: Hardware Engine.
886 * This function quickly capture a snapshot and immediately print it out.
888 void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p)
890 struct xe_hw_engine_snapshot *snapshot;
892 snapshot = xe_hw_engine_snapshot_capture(hwe);
893 xe_hw_engine_snapshot_print(snapshot, p);
894 xe_hw_engine_snapshot_free(snapshot);
897 u32 xe_hw_engine_mask_per_class(struct xe_gt *gt,
898 enum xe_engine_class engine_class)
901 enum xe_hw_engine_id id;
903 for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
904 if (engine_infos[id].class == engine_class &&
905 gt->info.engine_mask & BIT(id))
906 mask |= BIT(engine_infos[id].instance);
911 bool xe_hw_engine_is_reserved(struct xe_hw_engine *hwe)
913 struct xe_gt *gt = hwe->gt;
914 struct xe_device *xe = gt_to_xe(gt);
916 if (hwe->class == XE_ENGINE_CLASS_OTHER)
919 /* Check for engines disabled by ccs_mode setting */
920 if (xe_gt_ccs_mode_enabled(gt) &&
921 hwe->class == XE_ENGINE_CLASS_COMPUTE &&
922 hwe->logical_instance >= gt->ccs_mode)
925 return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
926 hwe->instance == gt->usm.reserved_bcs_instance;