1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
6 #include "xe_hw_engine.h"
8 #include <drm/drm_managed.h>
10 #include "regs/xe_engine_regs.h"
11 #include "regs/xe_gt_regs.h"
12 #include "xe_assert.h"
14 #include "xe_device.h"
15 #include "xe_execlist.h"
16 #include "xe_force_wake.h"
18 #include "xe_gt_ccs_mode.h"
19 #include "xe_gt_topology.h"
20 #include "xe_hw_fence.h"
23 #include "xe_macros.h"
25 #include "xe_reg_sr.h"
27 #include "xe_sched_job.h"
28 #include "xe_tuning.h"
32 #define MAX_MMIO_BASES 3
35 unsigned int class : 8;
36 unsigned int instance : 8;
37 enum xe_force_wake_domains domain;
41 static const struct engine_info engine_infos[] = {
42 [XE_HW_ENGINE_RCS0] = {
44 .class = XE_ENGINE_CLASS_RENDER,
46 .domain = XE_FW_RENDER,
47 .mmio_base = RENDER_RING_BASE,
49 [XE_HW_ENGINE_BCS0] = {
51 .class = XE_ENGINE_CLASS_COPY,
53 .domain = XE_FW_RENDER,
54 .mmio_base = BLT_RING_BASE,
56 [XE_HW_ENGINE_BCS1] = {
58 .class = XE_ENGINE_CLASS_COPY,
60 .domain = XE_FW_RENDER,
61 .mmio_base = XEHPC_BCS1_RING_BASE,
63 [XE_HW_ENGINE_BCS2] = {
65 .class = XE_ENGINE_CLASS_COPY,
67 .domain = XE_FW_RENDER,
68 .mmio_base = XEHPC_BCS2_RING_BASE,
70 [XE_HW_ENGINE_BCS3] = {
72 .class = XE_ENGINE_CLASS_COPY,
74 .domain = XE_FW_RENDER,
75 .mmio_base = XEHPC_BCS3_RING_BASE,
77 [XE_HW_ENGINE_BCS4] = {
79 .class = XE_ENGINE_CLASS_COPY,
81 .domain = XE_FW_RENDER,
82 .mmio_base = XEHPC_BCS4_RING_BASE,
84 [XE_HW_ENGINE_BCS5] = {
86 .class = XE_ENGINE_CLASS_COPY,
88 .domain = XE_FW_RENDER,
89 .mmio_base = XEHPC_BCS5_RING_BASE,
91 [XE_HW_ENGINE_BCS6] = {
93 .class = XE_ENGINE_CLASS_COPY,
95 .domain = XE_FW_RENDER,
96 .mmio_base = XEHPC_BCS6_RING_BASE,
98 [XE_HW_ENGINE_BCS7] = {
100 .class = XE_ENGINE_CLASS_COPY,
102 .domain = XE_FW_RENDER,
103 .mmio_base = XEHPC_BCS7_RING_BASE,
105 [XE_HW_ENGINE_BCS8] = {
107 .class = XE_ENGINE_CLASS_COPY,
109 .domain = XE_FW_RENDER,
110 .mmio_base = XEHPC_BCS8_RING_BASE,
113 [XE_HW_ENGINE_VCS0] = {
115 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
117 .domain = XE_FW_MEDIA_VDBOX0,
118 .mmio_base = BSD_RING_BASE,
120 [XE_HW_ENGINE_VCS1] = {
122 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
124 .domain = XE_FW_MEDIA_VDBOX1,
125 .mmio_base = BSD2_RING_BASE,
127 [XE_HW_ENGINE_VCS2] = {
129 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
131 .domain = XE_FW_MEDIA_VDBOX2,
132 .mmio_base = BSD3_RING_BASE,
134 [XE_HW_ENGINE_VCS3] = {
136 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
138 .domain = XE_FW_MEDIA_VDBOX3,
139 .mmio_base = BSD4_RING_BASE,
141 [XE_HW_ENGINE_VCS4] = {
143 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
145 .domain = XE_FW_MEDIA_VDBOX4,
146 .mmio_base = XEHP_BSD5_RING_BASE,
148 [XE_HW_ENGINE_VCS5] = {
150 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
152 .domain = XE_FW_MEDIA_VDBOX5,
153 .mmio_base = XEHP_BSD6_RING_BASE,
155 [XE_HW_ENGINE_VCS6] = {
157 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
159 .domain = XE_FW_MEDIA_VDBOX6,
160 .mmio_base = XEHP_BSD7_RING_BASE,
162 [XE_HW_ENGINE_VCS7] = {
164 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
166 .domain = XE_FW_MEDIA_VDBOX7,
167 .mmio_base = XEHP_BSD8_RING_BASE,
169 [XE_HW_ENGINE_VECS0] = {
171 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
173 .domain = XE_FW_MEDIA_VEBOX0,
174 .mmio_base = VEBOX_RING_BASE,
176 [XE_HW_ENGINE_VECS1] = {
178 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
180 .domain = XE_FW_MEDIA_VEBOX1,
181 .mmio_base = VEBOX2_RING_BASE,
183 [XE_HW_ENGINE_VECS2] = {
185 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
187 .domain = XE_FW_MEDIA_VEBOX2,
188 .mmio_base = XEHP_VEBOX3_RING_BASE,
190 [XE_HW_ENGINE_VECS3] = {
192 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
194 .domain = XE_FW_MEDIA_VEBOX3,
195 .mmio_base = XEHP_VEBOX4_RING_BASE,
197 [XE_HW_ENGINE_CCS0] = {
199 .class = XE_ENGINE_CLASS_COMPUTE,
201 .domain = XE_FW_RENDER,
202 .mmio_base = COMPUTE0_RING_BASE,
204 [XE_HW_ENGINE_CCS1] = {
206 .class = XE_ENGINE_CLASS_COMPUTE,
208 .domain = XE_FW_RENDER,
209 .mmio_base = COMPUTE1_RING_BASE,
211 [XE_HW_ENGINE_CCS2] = {
213 .class = XE_ENGINE_CLASS_COMPUTE,
215 .domain = XE_FW_RENDER,
216 .mmio_base = COMPUTE2_RING_BASE,
218 [XE_HW_ENGINE_CCS3] = {
220 .class = XE_ENGINE_CLASS_COMPUTE,
222 .domain = XE_FW_RENDER,
223 .mmio_base = COMPUTE3_RING_BASE,
225 [XE_HW_ENGINE_GSCCS0] = {
227 .class = XE_ENGINE_CLASS_OTHER,
228 .instance = OTHER_GSC_INSTANCE,
230 .mmio_base = GSCCS_RING_BASE,
234 static void hw_engine_fini(struct drm_device *drm, void *arg)
236 struct xe_hw_engine *hwe = arg;
239 xe_execlist_port_destroy(hwe->exl_port);
240 xe_lrc_finish(&hwe->kernel_lrc);
245 static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg,
248 xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
249 xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
251 reg.addr += hwe->mmio_base;
253 xe_mmio_write32(hwe->gt, reg, val);
256 static u32 hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
258 xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
259 xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
261 reg.addr += hwe->mmio_base;
263 return xe_mmio_read32(hwe->gt, reg);
266 void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
269 xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
271 if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
272 xe_mmio_write32(hwe->gt, RCU_MODE,
273 _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
275 hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
276 hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
277 xe_bo_ggtt_addr(hwe->hwsp));
278 hw_engine_mmio_write32(hwe, RING_MODE(0),
279 _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
280 hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
281 _MASKED_BIT_DISABLE(STOP_RING));
282 hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
285 static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt *gt,
286 const struct xe_hw_engine *hwe)
288 return xe_gt_ccs_mode_enabled(gt) &&
289 xe_rtp_match_first_render_or_compute(gt, hwe);
293 xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
295 struct xe_gt *gt = hwe->gt;
296 const u8 mocs_write_idx = gt->mocs.uc_index;
297 const u8 mocs_read_idx = gt->mocs.uc_index;
298 u32 blit_cctl_val = REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, mocs_write_idx) |
299 REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, mocs_read_idx);
300 struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
301 const struct xe_rtp_entry_sr lrc_was[] = {
303 * Some blitter commands do not have a field for MOCS, those
304 * commands will use MOCS index pointed by BLIT_CCTL.
305 * BLIT_CCTL registers are needed to be programmed to un-cached.
307 { XE_RTP_NAME("BLIT_CCTL_default_MOCS"),
308 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED),
310 XE_RTP_ACTIONS(FIELD_SET(BLIT_CCTL(0),
311 BLIT_CCTL_DST_MOCS_MASK |
312 BLIT_CCTL_SRC_MOCS_MASK,
314 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
316 /* Use Fixed slice CCS mode */
317 { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
318 XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
319 XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
320 RCU_MODE_FIXED_SLICE_CCS_MODE))
325 xe_rtp_process_to_sr(&ctx, lrc_was, &hwe->reg_lrc);
329 hw_engine_setup_default_state(struct xe_hw_engine *hwe)
331 struct xe_gt *gt = hwe->gt;
332 struct xe_device *xe = gt_to_xe(gt);
334 * RING_CMD_CCTL specifies the default MOCS entry that will be
335 * used by the command streamer when executing commands that
336 * don't have a way to explicitly specify a MOCS setting.
337 * The default should usually reference whichever MOCS entry
338 * corresponds to uncached behavior, although use of a WB cached
339 * entry is recommended by the spec in certain circumstances on
340 * specific platforms.
343 const u8 mocs_write_idx = gt->mocs.uc_index;
344 const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE &&
345 (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
346 gt->mocs.wb_index : gt->mocs.uc_index;
347 u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
348 REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, mocs_read_idx);
349 struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
350 const struct xe_rtp_entry_sr engine_entries[] = {
351 { XE_RTP_NAME("RING_CMD_CCTL_default_MOCS"),
352 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED)),
353 XE_RTP_ACTIONS(FIELD_SET(RING_CMD_CCTL(0),
354 CMD_CCTL_WRITE_OVERRIDE_MASK |
355 CMD_CCTL_READ_OVERRIDE_MASK,
357 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
360 * To allow the GSC engine to go idle on MTL we need to enable
361 * idle messaging and set the hysteresis value (we use 0xA=5us
362 * as recommended in spec). On platforms after MTL this is
363 * enabled by default.
365 { XE_RTP_NAME("MTL GSCCS IDLE MSG enable"),
366 XE_RTP_RULES(MEDIA_VERSION(1300), ENGINE_CLASS(OTHER)),
367 XE_RTP_ACTIONS(CLR(RING_PSMI_CTL(0),
369 XE_RTP_ACTION_FLAG(ENGINE_BASE)),
370 FIELD_SET(RING_PWRCTX_MAXCNT(0),
373 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
378 xe_rtp_process_to_sr(&ctx, engine_entries, &hwe->reg_sr);
381 static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
382 enum xe_hw_engine_id id)
384 const struct engine_info *info;
386 if (WARN_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name))
389 if (!(gt->info.engine_mask & BIT(id)))
392 info = &engine_infos[id];
394 xe_gt_assert(gt, !hwe->gt);
397 hwe->class = info->class;
398 hwe->instance = info->instance;
399 hwe->mmio_base = info->mmio_base;
400 hwe->domain = info->domain;
401 hwe->name = info->name;
402 hwe->fence_irq = >->fence_irq[info->class];
405 hwe->eclass = >->eclass[hwe->class];
406 if (!hwe->eclass->sched_props.job_timeout_ms) {
407 hwe->eclass->sched_props.job_timeout_ms = 5 * 1000;
408 hwe->eclass->sched_props.job_timeout_min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
409 hwe->eclass->sched_props.job_timeout_max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
410 hwe->eclass->sched_props.timeslice_us = 1 * 1000;
411 hwe->eclass->sched_props.timeslice_min = XE_HW_ENGINE_TIMESLICE_MIN;
412 hwe->eclass->sched_props.timeslice_max = XE_HW_ENGINE_TIMESLICE_MAX;
413 hwe->eclass->sched_props.preempt_timeout_us = XE_HW_ENGINE_PREEMPT_TIMEOUT;
414 hwe->eclass->sched_props.preempt_timeout_min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
415 hwe->eclass->sched_props.preempt_timeout_max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
416 /* Record default props */
417 hwe->eclass->defaults = hwe->eclass->sched_props;
420 xe_reg_sr_init(&hwe->reg_sr, hwe->name, gt_to_xe(gt));
421 xe_tuning_process_engine(hwe);
422 xe_wa_process_engine(hwe);
423 hw_engine_setup_default_state(hwe);
425 xe_reg_sr_init(&hwe->reg_whitelist, hwe->name, gt_to_xe(gt));
426 xe_reg_whitelist_process_engine(hwe);
429 static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
430 enum xe_hw_engine_id id)
432 struct xe_device *xe = gt_to_xe(gt);
433 struct xe_tile *tile = gt_to_tile(gt);
436 xe_gt_assert(gt, id < ARRAY_SIZE(engine_infos) && engine_infos[id].name);
437 xe_gt_assert(gt, gt->info.engine_mask & BIT(id));
439 xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
440 xe_reg_sr_apply_whitelist(hwe);
442 hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
443 XE_BO_CREATE_VRAM_IF_DGFX(tile) |
444 XE_BO_CREATE_GGTT_BIT);
445 if (IS_ERR(hwe->hwsp)) {
446 err = PTR_ERR(hwe->hwsp);
450 err = xe_lrc_init(&hwe->kernel_lrc, hwe, NULL, NULL, SZ_16K);
454 if (!xe_device_uc_enabled(xe)) {
455 hwe->exl_port = xe_execlist_port_create(xe, hwe);
456 if (IS_ERR(hwe->exl_port)) {
457 err = PTR_ERR(hwe->exl_port);
462 if (xe_device_uc_enabled(xe))
463 xe_hw_engine_enable_ring(hwe);
465 /* We reserve the highest BCS instance for USM */
466 if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY)
467 gt->usm.reserved_bcs_instance = hwe->instance;
469 err = drmm_add_action_or_reset(&xe->drm, hw_engine_fini, hwe);
476 xe_lrc_finish(&hwe->kernel_lrc);
478 xe_bo_unpin_map_no_vm(hwe->hwsp);
485 static void hw_engine_setup_logical_mapping(struct xe_gt *gt)
489 /* FIXME: Doing a simple logical mapping that works for most hardware */
490 for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
491 struct xe_hw_engine *hwe;
492 enum xe_hw_engine_id id;
493 int logical_instance = 0;
495 for_each_hw_engine(hwe, gt, id)
496 if (hwe->class == class)
497 hwe->logical_instance = logical_instance++;
501 static void read_media_fuses(struct xe_gt *gt)
503 struct xe_device *xe = gt_to_xe(gt);
509 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
511 media_fuse = xe_mmio_read32(gt, GT_VEBOX_VDBOX_DISABLE);
514 * Pre-Xe_HP platforms had register bits representing absent engines,
515 * whereas Xe_HP and beyond have bits representing present engines.
516 * Invert the polarity on old platforms so that we can use common
519 if (GRAPHICS_VERx100(xe) < 1250)
520 media_fuse = ~media_fuse;
522 vdbox_mask = REG_FIELD_GET(GT_VDBOX_DISABLE_MASK, media_fuse);
523 vebox_mask = REG_FIELD_GET(GT_VEBOX_DISABLE_MASK, media_fuse);
525 for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
526 if (!(gt->info.engine_mask & BIT(i)))
529 if (!(BIT(j) & vdbox_mask)) {
530 gt->info.engine_mask &= ~BIT(i);
531 drm_info(&xe->drm, "vcs%u fused off\n", j);
535 for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
536 if (!(gt->info.engine_mask & BIT(i)))
539 if (!(BIT(j) & vebox_mask)) {
540 gt->info.engine_mask &= ~BIT(i);
541 drm_info(&xe->drm, "vecs%u fused off\n", j);
546 static void read_copy_fuses(struct xe_gt *gt)
548 struct xe_device *xe = gt_to_xe(gt);
551 if (GRAPHICS_VERx100(xe) < 1260 || GRAPHICS_VERx100(xe) >= 1270)
554 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
556 bcs_mask = xe_mmio_read32(gt, MIRROR_FUSE3);
557 bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask);
559 /* BCS0 is always present; only BCS1-BCS8 may be fused off */
560 for (int i = XE_HW_ENGINE_BCS1, j = 0; i <= XE_HW_ENGINE_BCS8; ++i, ++j) {
561 if (!(gt->info.engine_mask & BIT(i)))
564 if (!(BIT(j / 2) & bcs_mask)) {
565 gt->info.engine_mask &= ~BIT(i);
566 drm_info(&xe->drm, "bcs%u fused off\n", j);
571 static void read_compute_fuses_from_dss(struct xe_gt *gt)
573 struct xe_device *xe = gt_to_xe(gt);
576 * CCS fusing based on DSS masks only applies to platforms that can
577 * have more than one CCS.
579 if (hweight64(gt->info.engine_mask &
580 GENMASK_ULL(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)) <= 1)
584 * CCS availability on Xe_HP is inferred from the presence of DSS in
587 for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
588 if (!(gt->info.engine_mask & BIT(i)))
591 if (!xe_gt_topology_has_dss_in_quadrant(gt, j)) {
592 gt->info.engine_mask &= ~BIT(i);
593 drm_info(&xe->drm, "ccs%u fused off\n", j);
598 static void read_compute_fuses_from_reg(struct xe_gt *gt)
600 struct xe_device *xe = gt_to_xe(gt);
603 ccs_mask = xe_mmio_read32(gt, XEHP_FUSE4);
604 ccs_mask = REG_FIELD_GET(CCS_EN_MASK, ccs_mask);
606 for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
607 if (!(gt->info.engine_mask & BIT(i)))
610 if ((ccs_mask & BIT(j)) == 0) {
611 gt->info.engine_mask &= ~BIT(i);
612 drm_info(&xe->drm, "ccs%u fused off\n", j);
617 static void read_compute_fuses(struct xe_gt *gt)
619 if (GRAPHICS_VER(gt_to_xe(gt)) >= 20)
620 read_compute_fuses_from_reg(gt);
622 read_compute_fuses_from_dss(gt);
625 static void check_gsc_availability(struct xe_gt *gt)
627 struct xe_device *xe = gt_to_xe(gt);
629 if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
633 * The GSCCS is only used to communicate with the GSC FW, so if we don't
634 * have the FW there is nothing we need the engine for and can therefore
635 * skip its initialization.
637 if (!xe_uc_fw_is_available(>->uc.gsc.fw)) {
638 gt->info.engine_mask &= ~BIT(XE_HW_ENGINE_GSCCS0);
639 drm_info(&xe->drm, "gsccs disabled due to lack of FW\n");
643 int xe_hw_engines_init_early(struct xe_gt *gt)
647 read_media_fuses(gt);
649 read_compute_fuses(gt);
650 check_gsc_availability(gt);
652 BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT < XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN);
653 BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT > XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX);
655 for (i = 0; i < ARRAY_SIZE(gt->hw_engines); i++)
656 hw_engine_init_early(gt, >->hw_engines[i], i);
661 int xe_hw_engines_init(struct xe_gt *gt)
664 struct xe_hw_engine *hwe;
665 enum xe_hw_engine_id id;
667 for_each_hw_engine(hwe, gt, id) {
668 err = hw_engine_init(gt, hwe, id);
673 hw_engine_setup_logical_mapping(gt);
678 void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
680 wake_up_all(>_to_xe(hwe->gt)->ufence_wq);
682 if (hwe->irq_handler)
683 hwe->irq_handler(hwe, intr_vec);
685 if (intr_vec & GT_RENDER_USER_INTERRUPT)
686 xe_hw_fence_irq_run(hwe->fence_irq);
690 * xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine.
691 * @hwe: Xe HW Engine.
693 * This can be printed out in a later stage like during dev_coredump
696 * Returns: a Xe HW Engine snapshot object that must be freed by the
697 * caller, using `xe_hw_engine_snapshot_free`.
699 struct xe_hw_engine_snapshot *
700 xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
702 struct xe_hw_engine_snapshot *snapshot;
705 if (!xe_hw_engine_is_valid(hwe))
708 snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
713 len = strlen(hwe->name) + 1;
714 snapshot->name = kzalloc(len, GFP_ATOMIC);
716 strscpy(snapshot->name, hwe->name, len);
718 snapshot->class = hwe->class;
719 snapshot->logical_instance = hwe->logical_instance;
720 snapshot->forcewake.domain = hwe->domain;
721 snapshot->forcewake.ref = xe_force_wake_ref(gt_to_fw(hwe->gt),
723 snapshot->mmio_base = hwe->mmio_base;
725 snapshot->reg.ring_hwstam = hw_engine_mmio_read32(hwe, RING_HWSTAM(0));
726 snapshot->reg.ring_hws_pga = hw_engine_mmio_read32(hwe,
728 snapshot->reg.ring_execlist_status_lo =
729 hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0));
730 snapshot->reg.ring_execlist_status_hi =
731 hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0));
732 snapshot->reg.ring_execlist_sq_contents_lo =
733 hw_engine_mmio_read32(hwe,
734 RING_EXECLIST_SQ_CONTENTS_LO(0));
735 snapshot->reg.ring_execlist_sq_contents_hi =
736 hw_engine_mmio_read32(hwe,
737 RING_EXECLIST_SQ_CONTENTS_HI(0));
738 snapshot->reg.ring_start = hw_engine_mmio_read32(hwe, RING_START(0));
739 snapshot->reg.ring_head =
740 hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR;
741 snapshot->reg.ring_tail =
742 hw_engine_mmio_read32(hwe, RING_TAIL(0)) & TAIL_ADDR;
743 snapshot->reg.ring_ctl = hw_engine_mmio_read32(hwe, RING_CTL(0));
744 snapshot->reg.ring_mi_mode =
745 hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
746 snapshot->reg.ring_mode = hw_engine_mmio_read32(hwe, RING_MODE(0));
747 snapshot->reg.ring_imr = hw_engine_mmio_read32(hwe, RING_IMR(0));
748 snapshot->reg.ring_esr = hw_engine_mmio_read32(hwe, RING_ESR(0));
749 snapshot->reg.ring_emr = hw_engine_mmio_read32(hwe, RING_EMR(0));
750 snapshot->reg.ring_eir = hw_engine_mmio_read32(hwe, RING_EIR(0));
751 snapshot->reg.ring_acthd_udw =
752 hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0));
753 snapshot->reg.ring_acthd = hw_engine_mmio_read32(hwe, RING_ACTHD(0));
754 snapshot->reg.ring_bbaddr_udw =
755 hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0));
756 snapshot->reg.ring_bbaddr = hw_engine_mmio_read32(hwe, RING_BBADDR(0));
757 snapshot->reg.ring_dma_fadd_udw =
758 hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0));
759 snapshot->reg.ring_dma_fadd =
760 hw_engine_mmio_read32(hwe, RING_DMA_FADD(0));
761 snapshot->reg.ipehr = hw_engine_mmio_read32(hwe, RING_IPEHR(0));
763 if (snapshot->class == XE_ENGINE_CLASS_COMPUTE)
764 snapshot->reg.rcu_mode = xe_mmio_read32(hwe->gt, RCU_MODE);
770 * xe_hw_engine_snapshot_print - Print out a given Xe HW Engine snapshot.
771 * @snapshot: Xe HW Engine snapshot object.
772 * @p: drm_printer where it will be printed out.
774 * This function prints out a given Xe HW Engine snapshot object.
776 void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot,
777 struct drm_printer *p)
782 drm_printf(p, "%s (physical), logical instance=%d\n",
783 snapshot->name ? snapshot->name : "",
784 snapshot->logical_instance);
785 drm_printf(p, "\tForcewake: domain 0x%x, ref %d\n",
786 snapshot->forcewake.domain, snapshot->forcewake.ref);
787 drm_printf(p, "\tHWSTAM: 0x%08x\n", snapshot->reg.ring_hwstam);
788 drm_printf(p, "\tRING_HWS_PGA: 0x%08x\n", snapshot->reg.ring_hws_pga);
789 drm_printf(p, "\tRING_EXECLIST_STATUS_LO: 0x%08x\n",
790 snapshot->reg.ring_execlist_status_lo);
791 drm_printf(p, "\tRING_EXECLIST_STATUS_HI: 0x%08x\n",
792 snapshot->reg.ring_execlist_status_hi);
793 drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS_LO: 0x%08x\n",
794 snapshot->reg.ring_execlist_sq_contents_lo);
795 drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS_HI: 0x%08x\n",
796 snapshot->reg.ring_execlist_sq_contents_hi);
797 drm_printf(p, "\tRING_START: 0x%08x\n", snapshot->reg.ring_start);
798 drm_printf(p, "\tRING_HEAD: 0x%08x\n", snapshot->reg.ring_head);
799 drm_printf(p, "\tRING_TAIL: 0x%08x\n", snapshot->reg.ring_tail);
800 drm_printf(p, "\tRING_CTL: 0x%08x\n", snapshot->reg.ring_ctl);
801 drm_printf(p, "\tRING_MI_MODE: 0x%08x\n", snapshot->reg.ring_mi_mode);
802 drm_printf(p, "\tRING_MODE: 0x%08x\n",
803 snapshot->reg.ring_mode);
804 drm_printf(p, "\tRING_IMR: 0x%08x\n", snapshot->reg.ring_imr);
805 drm_printf(p, "\tRING_ESR: 0x%08x\n", snapshot->reg.ring_esr);
806 drm_printf(p, "\tRING_EMR: 0x%08x\n", snapshot->reg.ring_emr);
807 drm_printf(p, "\tRING_EIR: 0x%08x\n", snapshot->reg.ring_eir);
808 drm_printf(p, "\tACTHD: 0x%08x_%08x\n", snapshot->reg.ring_acthd_udw,
809 snapshot->reg.ring_acthd);
810 drm_printf(p, "\tBBADDR: 0x%08x_%08x\n", snapshot->reg.ring_bbaddr_udw,
811 snapshot->reg.ring_bbaddr);
812 drm_printf(p, "\tDMA_FADDR: 0x%08x_%08x\n",
813 snapshot->reg.ring_dma_fadd_udw,
814 snapshot->reg.ring_dma_fadd);
815 drm_printf(p, "\tIPEHR: 0x%08x\n\n", snapshot->reg.ipehr);
816 if (snapshot->class == XE_ENGINE_CLASS_COMPUTE)
817 drm_printf(p, "\tRCU_MODE: 0x%08x\n",
818 snapshot->reg.rcu_mode);
822 * xe_hw_engine_snapshot_free - Free all allocated objects for a given snapshot.
823 * @snapshot: Xe HW Engine snapshot object.
825 * This function free all the memory that needed to be allocated at capture
828 void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot)
833 kfree(snapshot->name);
838 * xe_hw_engine_print - Xe HW Engine Print.
839 * @hwe: Hardware Engine.
842 * This function quickly capture a snapshot and immediately print it out.
844 void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p)
846 struct xe_hw_engine_snapshot *snapshot;
848 snapshot = xe_hw_engine_snapshot_capture(hwe);
849 xe_hw_engine_snapshot_print(snapshot, p);
850 xe_hw_engine_snapshot_free(snapshot);
853 u32 xe_hw_engine_mask_per_class(struct xe_gt *gt,
854 enum xe_engine_class engine_class)
857 enum xe_hw_engine_id id;
859 for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
860 if (engine_infos[id].class == engine_class &&
861 gt->info.engine_mask & BIT(id))
862 mask |= BIT(engine_infos[id].instance);
867 bool xe_hw_engine_is_reserved(struct xe_hw_engine *hwe)
869 struct xe_gt *gt = hwe->gt;
870 struct xe_device *xe = gt_to_xe(gt);
872 if (hwe->class == XE_ENGINE_CLASS_OTHER)
875 /* Check for engines disabled by ccs_mode setting */
876 if (xe_gt_ccs_mode_enabled(gt) &&
877 hwe->class == XE_ENGINE_CLASS_COMPUTE &&
878 hwe->logical_instance >= gt->ccs_mode)
881 return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
882 hwe->instance == gt->usm.reserved_bcs_instance;