1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
6 #include "xe_hw_engine.h"
8 #include <linux/nospec.h>
10 #include <drm/drm_managed.h>
11 #include <uapi/drm/xe_drm.h>
13 #include "regs/xe_engine_regs.h"
14 #include "regs/xe_gt_regs.h"
15 #include "regs/xe_irq_regs.h"
16 #include "xe_assert.h"
18 #include "xe_device.h"
19 #include "xe_execlist.h"
20 #include "xe_force_wake.h"
23 #include "xe_gt_ccs_mode.h"
24 #include "xe_gt_printk.h"
25 #include "xe_gt_mcr.h"
26 #include "xe_gt_topology.h"
27 #include "xe_guc_capture.h"
28 #include "xe_hw_engine_group.h"
29 #include "xe_hw_fence.h"
32 #include "xe_macros.h"
34 #include "xe_reg_sr.h"
35 #include "xe_reg_whitelist.h"
37 #include "xe_sched_job.h"
39 #include "xe_tuning.h"
43 #define MAX_MMIO_BASES 3
46 unsigned int class : 8;
47 unsigned int instance : 8;
48 unsigned int irq_offset : 8;
49 enum xe_force_wake_domains domain;
53 static const struct engine_info engine_infos[] = {
54 [XE_HW_ENGINE_RCS0] = {
56 .class = XE_ENGINE_CLASS_RENDER,
58 .irq_offset = ilog2(INTR_RCS0),
59 .domain = XE_FW_RENDER,
60 .mmio_base = RENDER_RING_BASE,
62 [XE_HW_ENGINE_BCS0] = {
64 .class = XE_ENGINE_CLASS_COPY,
66 .irq_offset = ilog2(INTR_BCS(0)),
67 .domain = XE_FW_RENDER,
68 .mmio_base = BLT_RING_BASE,
70 [XE_HW_ENGINE_BCS1] = {
72 .class = XE_ENGINE_CLASS_COPY,
74 .irq_offset = ilog2(INTR_BCS(1)),
75 .domain = XE_FW_RENDER,
76 .mmio_base = XEHPC_BCS1_RING_BASE,
78 [XE_HW_ENGINE_BCS2] = {
80 .class = XE_ENGINE_CLASS_COPY,
82 .irq_offset = ilog2(INTR_BCS(2)),
83 .domain = XE_FW_RENDER,
84 .mmio_base = XEHPC_BCS2_RING_BASE,
86 [XE_HW_ENGINE_BCS3] = {
88 .class = XE_ENGINE_CLASS_COPY,
90 .irq_offset = ilog2(INTR_BCS(3)),
91 .domain = XE_FW_RENDER,
92 .mmio_base = XEHPC_BCS3_RING_BASE,
94 [XE_HW_ENGINE_BCS4] = {
96 .class = XE_ENGINE_CLASS_COPY,
98 .irq_offset = ilog2(INTR_BCS(4)),
99 .domain = XE_FW_RENDER,
100 .mmio_base = XEHPC_BCS4_RING_BASE,
102 [XE_HW_ENGINE_BCS5] = {
104 .class = XE_ENGINE_CLASS_COPY,
106 .irq_offset = ilog2(INTR_BCS(5)),
107 .domain = XE_FW_RENDER,
108 .mmio_base = XEHPC_BCS5_RING_BASE,
110 [XE_HW_ENGINE_BCS6] = {
112 .class = XE_ENGINE_CLASS_COPY,
114 .irq_offset = ilog2(INTR_BCS(6)),
115 .domain = XE_FW_RENDER,
116 .mmio_base = XEHPC_BCS6_RING_BASE,
118 [XE_HW_ENGINE_BCS7] = {
120 .class = XE_ENGINE_CLASS_COPY,
121 .irq_offset = ilog2(INTR_BCS(7)),
123 .domain = XE_FW_RENDER,
124 .mmio_base = XEHPC_BCS7_RING_BASE,
126 [XE_HW_ENGINE_BCS8] = {
128 .class = XE_ENGINE_CLASS_COPY,
130 .irq_offset = ilog2(INTR_BCS8),
131 .domain = XE_FW_RENDER,
132 .mmio_base = XEHPC_BCS8_RING_BASE,
135 [XE_HW_ENGINE_VCS0] = {
137 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
139 .irq_offset = 32 + ilog2(INTR_VCS(0)),
140 .domain = XE_FW_MEDIA_VDBOX0,
141 .mmio_base = BSD_RING_BASE,
143 [XE_HW_ENGINE_VCS1] = {
145 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
147 .irq_offset = 32 + ilog2(INTR_VCS(1)),
148 .domain = XE_FW_MEDIA_VDBOX1,
149 .mmio_base = BSD2_RING_BASE,
151 [XE_HW_ENGINE_VCS2] = {
153 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
155 .irq_offset = 32 + ilog2(INTR_VCS(2)),
156 .domain = XE_FW_MEDIA_VDBOX2,
157 .mmio_base = BSD3_RING_BASE,
159 [XE_HW_ENGINE_VCS3] = {
161 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
163 .irq_offset = 32 + ilog2(INTR_VCS(3)),
164 .domain = XE_FW_MEDIA_VDBOX3,
165 .mmio_base = BSD4_RING_BASE,
167 [XE_HW_ENGINE_VCS4] = {
169 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
171 .irq_offset = 32 + ilog2(INTR_VCS(4)),
172 .domain = XE_FW_MEDIA_VDBOX4,
173 .mmio_base = XEHP_BSD5_RING_BASE,
175 [XE_HW_ENGINE_VCS5] = {
177 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
179 .irq_offset = 32 + ilog2(INTR_VCS(5)),
180 .domain = XE_FW_MEDIA_VDBOX5,
181 .mmio_base = XEHP_BSD6_RING_BASE,
183 [XE_HW_ENGINE_VCS6] = {
185 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
187 .irq_offset = 32 + ilog2(INTR_VCS(6)),
188 .domain = XE_FW_MEDIA_VDBOX6,
189 .mmio_base = XEHP_BSD7_RING_BASE,
191 [XE_HW_ENGINE_VCS7] = {
193 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
195 .irq_offset = 32 + ilog2(INTR_VCS(7)),
196 .domain = XE_FW_MEDIA_VDBOX7,
197 .mmio_base = XEHP_BSD8_RING_BASE,
199 [XE_HW_ENGINE_VECS0] = {
201 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
203 .irq_offset = 32 + ilog2(INTR_VECS(0)),
204 .domain = XE_FW_MEDIA_VEBOX0,
205 .mmio_base = VEBOX_RING_BASE,
207 [XE_HW_ENGINE_VECS1] = {
209 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
211 .irq_offset = 32 + ilog2(INTR_VECS(1)),
212 .domain = XE_FW_MEDIA_VEBOX1,
213 .mmio_base = VEBOX2_RING_BASE,
215 [XE_HW_ENGINE_VECS2] = {
217 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
219 .irq_offset = 32 + ilog2(INTR_VECS(2)),
220 .domain = XE_FW_MEDIA_VEBOX2,
221 .mmio_base = XEHP_VEBOX3_RING_BASE,
223 [XE_HW_ENGINE_VECS3] = {
225 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
227 .irq_offset = 32 + ilog2(INTR_VECS(3)),
228 .domain = XE_FW_MEDIA_VEBOX3,
229 .mmio_base = XEHP_VEBOX4_RING_BASE,
231 [XE_HW_ENGINE_CCS0] = {
233 .class = XE_ENGINE_CLASS_COMPUTE,
235 .irq_offset = ilog2(INTR_CCS(0)),
236 .domain = XE_FW_RENDER,
237 .mmio_base = COMPUTE0_RING_BASE,
239 [XE_HW_ENGINE_CCS1] = {
241 .class = XE_ENGINE_CLASS_COMPUTE,
243 .irq_offset = ilog2(INTR_CCS(1)),
244 .domain = XE_FW_RENDER,
245 .mmio_base = COMPUTE1_RING_BASE,
247 [XE_HW_ENGINE_CCS2] = {
249 .class = XE_ENGINE_CLASS_COMPUTE,
251 .irq_offset = ilog2(INTR_CCS(2)),
252 .domain = XE_FW_RENDER,
253 .mmio_base = COMPUTE2_RING_BASE,
255 [XE_HW_ENGINE_CCS3] = {
257 .class = XE_ENGINE_CLASS_COMPUTE,
259 .irq_offset = ilog2(INTR_CCS(3)),
260 .domain = XE_FW_RENDER,
261 .mmio_base = COMPUTE3_RING_BASE,
263 [XE_HW_ENGINE_GSCCS0] = {
265 .class = XE_ENGINE_CLASS_OTHER,
266 .instance = OTHER_GSC_INSTANCE,
268 .mmio_base = GSCCS_RING_BASE,
272 static void hw_engine_fini(void *arg)
274 struct xe_hw_engine *hwe = arg;
277 xe_execlist_port_destroy(hwe->exl_port);
283 * xe_hw_engine_mmio_write32() - Write engine register
285 * @reg: register to write into
286 * @val: desired 32-bit value to write
288 * This function will write val into an engine specific register.
289 * Forcewake must be held by the caller.
292 void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe,
293 struct xe_reg reg, u32 val)
295 xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
296 xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
298 reg.addr += hwe->mmio_base;
300 xe_mmio_write32(&hwe->gt->mmio, reg, val);
304 * xe_hw_engine_mmio_read32() - Read engine register
306 * @reg: register to read from
308 * This function will read from an engine specific register.
309 * Forcewake must be held by the caller.
311 * Return: value of the 32-bit register.
313 u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
315 xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
316 xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
318 reg.addr += hwe->mmio_base;
320 return xe_mmio_read32(&hwe->gt->mmio, reg);
323 void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
326 xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
328 if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
329 xe_mmio_write32(&hwe->gt->mmio, RCU_MODE,
330 _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
332 xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
333 xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
334 xe_bo_ggtt_addr(hwe->hwsp));
335 xe_hw_engine_mmio_write32(hwe, RING_MODE(0),
336 _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
337 xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
338 _MASKED_BIT_DISABLE(STOP_RING));
339 xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
342 static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt *gt,
343 const struct xe_hw_engine *hwe)
345 return xe_gt_ccs_mode_enabled(gt) &&
346 xe_rtp_match_first_render_or_compute(gt, hwe);
349 static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_gt *gt,
350 const struct xe_hw_engine *hwe)
352 if (GRAPHICS_VER(gt_to_xe(gt)) < 20)
355 if (hwe->class != XE_ENGINE_CLASS_COMPUTE &&
356 hwe->class != XE_ENGINE_CLASS_RENDER)
359 return xe_mmio_read32(&hwe->gt->mmio, XEHP_FUSE4) & CFEG_WMTP_DISABLE;
363 xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
365 struct xe_gt *gt = hwe->gt;
366 const u8 mocs_write_idx = gt->mocs.uc_index;
367 const u8 mocs_read_idx = gt->mocs.uc_index;
368 u32 blit_cctl_val = REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, mocs_write_idx) |
369 REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, mocs_read_idx);
370 struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
371 const struct xe_rtp_entry_sr lrc_setup[] = {
373 * Some blitter commands do not have a field for MOCS, those
374 * commands will use MOCS index pointed by BLIT_CCTL.
375 * BLIT_CCTL registers are needed to be programmed to un-cached.
377 { XE_RTP_NAME("BLIT_CCTL_default_MOCS"),
378 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED),
380 XE_RTP_ACTIONS(FIELD_SET(BLIT_CCTL(0),
381 BLIT_CCTL_DST_MOCS_MASK |
382 BLIT_CCTL_SRC_MOCS_MASK,
384 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
386 /* Use Fixed slice CCS mode */
387 { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
388 XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
389 XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
390 RCU_MODE_FIXED_SLICE_CCS_MODE))
392 /* Disable WMTP if HW doesn't support it */
393 { XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
394 XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
395 XE_RTP_ACTIONS(FIELD_SET(CS_CHICKEN1(0),
396 PREEMPT_GPGPU_LEVEL_MASK,
397 PREEMPT_GPGPU_THREAD_GROUP_LEVEL)),
398 XE_RTP_ENTRY_FLAG(FOREACH_ENGINE)
403 xe_rtp_process_to_sr(&ctx, lrc_setup, &hwe->reg_lrc);
407 hw_engine_setup_default_state(struct xe_hw_engine *hwe)
409 struct xe_gt *gt = hwe->gt;
410 struct xe_device *xe = gt_to_xe(gt);
412 * RING_CMD_CCTL specifies the default MOCS entry that will be
413 * used by the command streamer when executing commands that
414 * don't have a way to explicitly specify a MOCS setting.
415 * The default should usually reference whichever MOCS entry
416 * corresponds to uncached behavior, although use of a WB cached
417 * entry is recommended by the spec in certain circumstances on
418 * specific platforms.
421 const u8 mocs_write_idx = gt->mocs.uc_index;
422 const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE &&
423 (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
424 gt->mocs.wb_index : gt->mocs.uc_index;
425 u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
426 REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, mocs_read_idx);
427 struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
428 const struct xe_rtp_entry_sr engine_entries[] = {
429 { XE_RTP_NAME("RING_CMD_CCTL_default_MOCS"),
430 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED)),
431 XE_RTP_ACTIONS(FIELD_SET(RING_CMD_CCTL(0),
432 CMD_CCTL_WRITE_OVERRIDE_MASK |
433 CMD_CCTL_READ_OVERRIDE_MASK,
435 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
438 * To allow the GSC engine to go idle on MTL we need to enable
439 * idle messaging and set the hysteresis value (we use 0xA=5us
440 * as recommended in spec). On platforms after MTL this is
441 * enabled by default.
443 { XE_RTP_NAME("MTL GSCCS IDLE MSG enable"),
444 XE_RTP_RULES(MEDIA_VERSION(1300), ENGINE_CLASS(OTHER)),
445 XE_RTP_ACTIONS(CLR(RING_PSMI_CTL(0),
447 XE_RTP_ACTION_FLAG(ENGINE_BASE)),
448 FIELD_SET(RING_PWRCTX_MAXCNT(0),
451 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
453 /* Enable Priority Mem Read */
454 { XE_RTP_NAME("Priority_Mem_Read"),
455 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
456 XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
457 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
462 xe_rtp_process_to_sr(&ctx, engine_entries, &hwe->reg_sr);
465 static const struct engine_info *find_engine_info(enum xe_engine_class class, int instance)
467 const struct engine_info *info;
468 enum xe_hw_engine_id id;
470 for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
471 info = &engine_infos[id];
472 if (info->class == class && info->instance == instance)
479 static u16 get_msix_irq_offset(struct xe_gt *gt, enum xe_engine_class class)
481 /* For MSI-X, hw engines report to offset of engine instance zero */
482 const struct engine_info *info = find_engine_info(class, 0);
484 xe_gt_assert(gt, info);
486 return info ? info->irq_offset : 0;
489 static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
490 enum xe_hw_engine_id id)
492 const struct engine_info *info;
494 if (WARN_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name))
497 if (!(gt->info.engine_mask & BIT(id)))
500 info = &engine_infos[id];
502 xe_gt_assert(gt, !hwe->gt);
505 hwe->class = info->class;
506 hwe->instance = info->instance;
507 hwe->mmio_base = info->mmio_base;
508 hwe->irq_offset = xe_device_has_msix(gt_to_xe(gt)) ?
509 get_msix_irq_offset(gt, info->class) :
511 hwe->domain = info->domain;
512 hwe->name = info->name;
513 hwe->fence_irq = >->fence_irq[info->class];
516 hwe->eclass = >->eclass[hwe->class];
517 if (!hwe->eclass->sched_props.job_timeout_ms) {
518 hwe->eclass->sched_props.job_timeout_ms = 5 * 1000;
519 hwe->eclass->sched_props.job_timeout_min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
520 hwe->eclass->sched_props.job_timeout_max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
521 hwe->eclass->sched_props.timeslice_us = 1 * 1000;
522 hwe->eclass->sched_props.timeslice_min = XE_HW_ENGINE_TIMESLICE_MIN;
523 hwe->eclass->sched_props.timeslice_max = XE_HW_ENGINE_TIMESLICE_MAX;
524 hwe->eclass->sched_props.preempt_timeout_us = XE_HW_ENGINE_PREEMPT_TIMEOUT;
525 hwe->eclass->sched_props.preempt_timeout_min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
526 hwe->eclass->sched_props.preempt_timeout_max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
529 * The GSC engine can accept submissions while the GSC shim is
530 * being reset, during which time the submission is stalled. In
531 * the worst case, the shim reset can take up to the maximum GSC
532 * command execution time (250ms), so the request start can be
533 * delayed by that much; the request itself can take that long
534 * without being preemptible, which means worst case it can
535 * theoretically take up to 500ms for a preemption to go through
536 * on the GSC engine. Adding to that an extra 100ms as a safety
537 * margin, we get a minimum recommended timeout of 600ms.
538 * The preempt_timeout value can't be tuned for OTHER_CLASS
539 * because the class is reserved for kernel usage, so we just
540 * need to make sure that the starting value is above that
541 * threshold; since our default value (640ms) is greater than
542 * 600ms, the only way we can go below is via a kconfig setting.
543 * If that happens, log it in dmesg and update the value.
545 if (hwe->class == XE_ENGINE_CLASS_OTHER) {
546 const u32 min_preempt_timeout = 600 * 1000;
547 if (hwe->eclass->sched_props.preempt_timeout_us < min_preempt_timeout) {
548 hwe->eclass->sched_props.preempt_timeout_us = min_preempt_timeout;
549 xe_gt_notice(gt, "Increasing preempt_timeout for GSC to 600ms\n");
553 /* Record default props */
554 hwe->eclass->defaults = hwe->eclass->sched_props;
557 xe_reg_sr_init(&hwe->reg_sr, hwe->name, gt_to_xe(gt));
558 xe_tuning_process_engine(hwe);
559 xe_wa_process_engine(hwe);
560 hw_engine_setup_default_state(hwe);
562 xe_reg_sr_init(&hwe->reg_whitelist, hwe->name, gt_to_xe(gt));
563 xe_reg_whitelist_process_engine(hwe);
566 static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
567 enum xe_hw_engine_id id)
569 struct xe_device *xe = gt_to_xe(gt);
570 struct xe_tile *tile = gt_to_tile(gt);
573 xe_gt_assert(gt, id < ARRAY_SIZE(engine_infos) && engine_infos[id].name);
574 xe_gt_assert(gt, gt->info.engine_mask & BIT(id));
576 xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
577 xe_reg_sr_apply_whitelist(hwe);
579 hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
580 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
582 XE_BO_FLAG_GGTT_INVALIDATE);
583 if (IS_ERR(hwe->hwsp)) {
584 err = PTR_ERR(hwe->hwsp);
588 if (!xe_device_uc_enabled(xe)) {
589 hwe->exl_port = xe_execlist_port_create(xe, hwe);
590 if (IS_ERR(hwe->exl_port)) {
591 err = PTR_ERR(hwe->exl_port);
595 /* GSCCS has a special interrupt for reset */
596 if (hwe->class == XE_ENGINE_CLASS_OTHER)
597 hwe->irq_handler = xe_gsc_hwe_irq_handler;
599 if (!IS_SRIOV_VF(xe))
600 xe_hw_engine_enable_ring(hwe);
603 /* We reserve the highest BCS instance for USM */
604 if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY)
605 gt->usm.reserved_bcs_instance = hwe->instance;
607 return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe);
610 xe_bo_unpin_map_no_vm(hwe->hwsp);
617 static void hw_engine_setup_logical_mapping(struct xe_gt *gt)
621 /* FIXME: Doing a simple logical mapping that works for most hardware */
622 for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
623 struct xe_hw_engine *hwe;
624 enum xe_hw_engine_id id;
625 int logical_instance = 0;
627 for_each_hw_engine(hwe, gt, id)
628 if (hwe->class == class)
629 hwe->logical_instance = logical_instance++;
633 static void read_media_fuses(struct xe_gt *gt)
635 struct xe_device *xe = gt_to_xe(gt);
641 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
643 media_fuse = xe_mmio_read32(>->mmio, GT_VEBOX_VDBOX_DISABLE);
646 * Pre-Xe_HP platforms had register bits representing absent engines,
647 * whereas Xe_HP and beyond have bits representing present engines.
648 * Invert the polarity on old platforms so that we can use common
651 if (GRAPHICS_VERx100(xe) < 1250)
652 media_fuse = ~media_fuse;
654 vdbox_mask = REG_FIELD_GET(GT_VDBOX_DISABLE_MASK, media_fuse);
655 vebox_mask = REG_FIELD_GET(GT_VEBOX_DISABLE_MASK, media_fuse);
657 for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
658 if (!(gt->info.engine_mask & BIT(i)))
661 if (!(BIT(j) & vdbox_mask)) {
662 gt->info.engine_mask &= ~BIT(i);
663 drm_info(&xe->drm, "vcs%u fused off\n", j);
667 for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
668 if (!(gt->info.engine_mask & BIT(i)))
671 if (!(BIT(j) & vebox_mask)) {
672 gt->info.engine_mask &= ~BIT(i);
673 drm_info(&xe->drm, "vecs%u fused off\n", j);
678 static void read_copy_fuses(struct xe_gt *gt)
680 struct xe_device *xe = gt_to_xe(gt);
683 if (GRAPHICS_VERx100(xe) < 1260 || GRAPHICS_VERx100(xe) >= 1270)
686 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
688 bcs_mask = xe_mmio_read32(>->mmio, MIRROR_FUSE3);
689 bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask);
691 /* BCS0 is always present; only BCS1-BCS8 may be fused off */
692 for (int i = XE_HW_ENGINE_BCS1, j = 0; i <= XE_HW_ENGINE_BCS8; ++i, ++j) {
693 if (!(gt->info.engine_mask & BIT(i)))
696 if (!(BIT(j / 2) & bcs_mask)) {
697 gt->info.engine_mask &= ~BIT(i);
698 drm_info(&xe->drm, "bcs%u fused off\n", j);
703 static void read_compute_fuses_from_dss(struct xe_gt *gt)
705 struct xe_device *xe = gt_to_xe(gt);
708 * CCS fusing based on DSS masks only applies to platforms that can
709 * have more than one CCS.
711 if (hweight64(gt->info.engine_mask &
712 GENMASK_ULL(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)) <= 1)
716 * CCS availability on Xe_HP is inferred from the presence of DSS in
719 for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
720 if (!(gt->info.engine_mask & BIT(i)))
723 if (!xe_gt_topology_has_dss_in_quadrant(gt, j)) {
724 gt->info.engine_mask &= ~BIT(i);
725 drm_info(&xe->drm, "ccs%u fused off\n", j);
730 static void read_compute_fuses_from_reg(struct xe_gt *gt)
732 struct xe_device *xe = gt_to_xe(gt);
735 ccs_mask = xe_mmio_read32(>->mmio, XEHP_FUSE4);
736 ccs_mask = REG_FIELD_GET(CCS_EN_MASK, ccs_mask);
738 for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
739 if (!(gt->info.engine_mask & BIT(i)))
742 if ((ccs_mask & BIT(j)) == 0) {
743 gt->info.engine_mask &= ~BIT(i);
744 drm_info(&xe->drm, "ccs%u fused off\n", j);
749 static void read_compute_fuses(struct xe_gt *gt)
751 if (GRAPHICS_VER(gt_to_xe(gt)) >= 20)
752 read_compute_fuses_from_reg(gt);
754 read_compute_fuses_from_dss(gt);
757 static void check_gsc_availability(struct xe_gt *gt)
759 struct xe_device *xe = gt_to_xe(gt);
761 if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
765 * The GSCCS is only used to communicate with the GSC FW, so if we don't
766 * have the FW there is nothing we need the engine for and can therefore
767 * skip its initialization.
769 if (!xe_uc_fw_is_available(>->uc.gsc.fw)) {
770 gt->info.engine_mask &= ~BIT(XE_HW_ENGINE_GSCCS0);
772 /* interrupts where previously enabled, so turn them off */
773 xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_ENABLE, 0);
774 xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_MASK, ~0);
776 drm_info(&xe->drm, "gsccs disabled due to lack of FW\n");
780 int xe_hw_engines_init_early(struct xe_gt *gt)
784 read_media_fuses(gt);
786 read_compute_fuses(gt);
787 check_gsc_availability(gt);
789 BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT < XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN);
790 BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT > XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX);
792 for (i = 0; i < ARRAY_SIZE(gt->hw_engines); i++)
793 hw_engine_init_early(gt, >->hw_engines[i], i);
798 int xe_hw_engines_init(struct xe_gt *gt)
801 struct xe_hw_engine *hwe;
802 enum xe_hw_engine_id id;
804 for_each_hw_engine(hwe, gt, id) {
805 err = hw_engine_init(gt, hwe, id);
810 hw_engine_setup_logical_mapping(gt);
811 err = xe_hw_engine_setup_groups(gt);
818 void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
820 wake_up_all(>_to_xe(hwe->gt)->ufence_wq);
822 if (hwe->irq_handler)
823 hwe->irq_handler(hwe, intr_vec);
825 if (intr_vec & GT_RENDER_USER_INTERRUPT)
826 xe_hw_fence_irq_run(hwe->fence_irq);
830 * xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine.
831 * @hwe: Xe HW Engine.
832 * @job: The job object.
834 * This can be printed out in a later stage like during dev_coredump
837 * Returns: a Xe HW Engine snapshot object that must be freed by the
838 * caller, using `xe_hw_engine_snapshot_free`.
840 struct xe_hw_engine_snapshot *
841 xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_sched_job *job)
843 struct xe_hw_engine_snapshot *snapshot;
844 struct __guc_capture_parsed_output *node;
846 if (!xe_hw_engine_is_valid(hwe))
849 snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
854 snapshot->name = kstrdup(hwe->name, GFP_ATOMIC);
856 snapshot->logical_instance = hwe->logical_instance;
857 snapshot->forcewake.domain = hwe->domain;
858 snapshot->forcewake.ref = xe_force_wake_ref(gt_to_fw(hwe->gt),
860 snapshot->mmio_base = hwe->mmio_base;
861 snapshot->kernel_reserved = xe_hw_engine_is_reserved(hwe);
863 /* no more VF accessible data below this point */
864 if (IS_SRIOV_VF(gt_to_xe(hwe->gt)))
868 /* If got guc capture, set source to GuC */
869 node = xe_guc_capture_get_matching_and_lock(job);
871 struct xe_device *xe = gt_to_xe(hwe->gt);
872 struct xe_devcoredump *coredump = &xe->devcoredump;
874 coredump->snapshot.matched_node = node;
875 snapshot->source = XE_ENGINE_CAPTURE_SOURCE_GUC;
876 xe_gt_dbg(hwe->gt, "Found and locked GuC-err-capture node");
881 /* otherwise, do manual capture */
882 xe_engine_manual_capture(hwe, snapshot);
883 snapshot->source = XE_ENGINE_CAPTURE_SOURCE_MANUAL;
884 xe_gt_dbg(hwe->gt, "Proceeding with manual engine snapshot");
890 * xe_hw_engine_snapshot_free - Free all allocated objects for a given snapshot.
891 * @snapshot: Xe HW Engine snapshot object.
893 * This function free all the memory that needed to be allocated at capture
896 void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot)
902 gt = snapshot->hwe->gt;
904 * xe_guc_capture_put_matched_nodes is called here and from
905 * xe_devcoredump_snapshot_free, to cover the 2 calling paths
906 * of hw_engines - debugfs and devcoredump free.
908 xe_guc_capture_put_matched_nodes(>->uc.guc);
910 kfree(snapshot->name);
915 * xe_hw_engine_print - Xe HW Engine Print.
916 * @hwe: Hardware Engine.
919 * This function quickly capture a snapshot and immediately print it out.
921 void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p)
923 struct xe_hw_engine_snapshot *snapshot;
925 snapshot = xe_hw_engine_snapshot_capture(hwe, NULL);
926 xe_engine_snapshot_print(snapshot, p);
927 xe_hw_engine_snapshot_free(snapshot);
930 u32 xe_hw_engine_mask_per_class(struct xe_gt *gt,
931 enum xe_engine_class engine_class)
934 enum xe_hw_engine_id id;
936 for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
937 if (engine_infos[id].class == engine_class &&
938 gt->info.engine_mask & BIT(id))
939 mask |= BIT(engine_infos[id].instance);
944 bool xe_hw_engine_is_reserved(struct xe_hw_engine *hwe)
946 struct xe_gt *gt = hwe->gt;
947 struct xe_device *xe = gt_to_xe(gt);
949 if (hwe->class == XE_ENGINE_CLASS_OTHER)
952 /* Check for engines disabled by ccs_mode setting */
953 if (xe_gt_ccs_mode_enabled(gt) &&
954 hwe->class == XE_ENGINE_CLASS_COMPUTE &&
955 hwe->logical_instance >= gt->ccs_mode)
958 return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
959 hwe->instance == gt->usm.reserved_bcs_instance;
962 const char *xe_hw_engine_class_to_str(enum xe_engine_class class)
965 case XE_ENGINE_CLASS_RENDER:
967 case XE_ENGINE_CLASS_VIDEO_DECODE:
969 case XE_ENGINE_CLASS_VIDEO_ENHANCE:
971 case XE_ENGINE_CLASS_COPY:
973 case XE_ENGINE_CLASS_OTHER:
975 case XE_ENGINE_CLASS_COMPUTE:
977 case XE_ENGINE_CLASS_MAX:
984 u64 xe_hw_engine_read_timestamp(struct xe_hw_engine *hwe)
986 return xe_mmio_read64_2x32(&hwe->gt->mmio, RING_TIMESTAMP(hwe->mmio_base));
989 enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe)
991 return engine_infos[hwe->engine_id].domain;
994 static const enum xe_engine_class user_to_xe_engine_class[] = {
995 [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
996 [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
997 [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
998 [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
999 [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
1003 * xe_hw_engine_lookup() - Lookup hardware engine for class:instance
1005 * @eci: engine class and instance
1007 * This function will find a hardware engine for given engine
1008 * class and instance.
1010 * Return: If found xe_hw_engine pointer, NULL otherwise.
1012 struct xe_hw_engine *
1013 xe_hw_engine_lookup(struct xe_device *xe,
1014 struct drm_xe_engine_class_instance eci)
1018 if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
1021 if (eci.gt_id >= xe->info.gt_count)
1024 idx = array_index_nospec(eci.engine_class,
1025 ARRAY_SIZE(user_to_xe_engine_class));
1027 return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
1028 user_to_xe_engine_class[idx],
1029 eci.engine_instance, true);