1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021-2022 Intel Corporation
6 #include <linux/types.h>
8 #include <drm/drm_print.h>
10 #include "gt/intel_engine_regs.h"
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_mcr.h"
13 #include "gt/intel_gt_regs.h"
14 #include "gt/intel_lrc.h"
15 #include "guc_capture_fwif.h"
16 #include "intel_guc_capture.h"
17 #include "intel_guc_fwif.h"
18 #include "intel_guc_print.h"
20 #include "i915_gpu_error.h"
22 #include "i915_memcpy.h"
26 * Define all device tables of GuC error capture register lists
27 * NOTE: For engine-registers, GuC only needs the register offsets
28 * from the engine-mmio-base
30 #define COMMON_BASE_GLOBAL \
31 { FORCEWAKE_MT, 0, 0, "FORCEWAKE" }
33 #define COMMON_GEN8BASE_GLOBAL \
34 { ERROR_GEN6, 0, 0, "ERROR_GEN6" }, \
35 { DONE_REG, 0, 0, "DONE_REG" }, \
36 { HSW_GTT_CACHE_EN, 0, 0, "HSW_GTT_CACHE_EN" }
39 { GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \
40 { GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" }
42 #define COMMON_GEN12BASE_GLOBAL \
43 { GEN12_FAULT_TLB_DATA0, 0, 0, "GEN12_FAULT_TLB_DATA0" }, \
44 { GEN12_FAULT_TLB_DATA1, 0, 0, "GEN12_FAULT_TLB_DATA1" }, \
45 { GEN12_AUX_ERR_DBG, 0, 0, "AUX_ERR_DBG" }, \
46 { GEN12_GAM_DONE, 0, 0, "GAM_DONE" }, \
47 { GEN12_RING_FAULT_REG, 0, 0, "FAULT_REG" }
49 #define COMMON_BASE_ENGINE_INSTANCE \
50 { RING_PSMI_CTL(0), 0, 0, "RC PSMI" }, \
51 { RING_ESR(0), 0, 0, "ESR" }, \
52 { RING_DMA_FADD(0), 0, 0, "RING_DMA_FADD_LDW" }, \
53 { RING_DMA_FADD_UDW(0), 0, 0, "RING_DMA_FADD_UDW" }, \
54 { RING_IPEIR(0), 0, 0, "IPEIR" }, \
55 { RING_IPEHR(0), 0, 0, "IPEHR" }, \
56 { RING_INSTPS(0), 0, 0, "INSTPS" }, \
57 { RING_BBADDR(0), 0, 0, "RING_BBADDR_LOW32" }, \
58 { RING_BBADDR_UDW(0), 0, 0, "RING_BBADDR_UP32" }, \
59 { RING_BBSTATE(0), 0, 0, "BB_STATE" }, \
60 { CCID(0), 0, 0, "CCID" }, \
61 { RING_ACTHD(0), 0, 0, "ACTHD_LDW" }, \
62 { RING_ACTHD_UDW(0), 0, 0, "ACTHD_UDW" }, \
63 { RING_INSTPM(0), 0, 0, "INSTPM" }, \
64 { RING_INSTDONE(0), 0, 0, "INSTDONE" }, \
65 { RING_NOPID(0), 0, 0, "RING_NOPID" }, \
66 { RING_START(0), 0, 0, "START" }, \
67 { RING_HEAD(0), 0, 0, "HEAD" }, \
68 { RING_TAIL(0), 0, 0, "TAIL" }, \
69 { RING_CTL(0), 0, 0, "CTL" }, \
70 { RING_MI_MODE(0), 0, 0, "MODE" }, \
71 { RING_CONTEXT_CONTROL(0), 0, 0, "RING_CONTEXT_CONTROL" }, \
72 { RING_HWS_PGA(0), 0, 0, "HWS" }, \
73 { RING_MODE_GEN7(0), 0, 0, "GFX_MODE" }, \
74 { GEN8_RING_PDP_LDW(0, 0), 0, 0, "PDP0_LDW" }, \
75 { GEN8_RING_PDP_UDW(0, 0), 0, 0, "PDP0_UDW" }, \
76 { GEN8_RING_PDP_LDW(0, 1), 0, 0, "PDP1_LDW" }, \
77 { GEN8_RING_PDP_UDW(0, 1), 0, 0, "PDP1_UDW" }, \
78 { GEN8_RING_PDP_LDW(0, 2), 0, 0, "PDP2_LDW" }, \
79 { GEN8_RING_PDP_UDW(0, 2), 0, 0, "PDP2_UDW" }, \
80 { GEN8_RING_PDP_LDW(0, 3), 0, 0, "PDP3_LDW" }, \
81 { GEN8_RING_PDP_UDW(0, 3), 0, 0, "PDP3_UDW" }
83 #define COMMON_BASE_HAS_EU \
86 #define COMMON_BASE_RENDER \
87 { GEN7_SC_INSTDONE, 0, 0, "GEN7_SC_INSTDONE" }
89 #define COMMON_GEN12BASE_RENDER \
90 { GEN12_SC_INSTDONE_EXTRA, 0, 0, "GEN12_SC_INSTDONE_EXTRA" }, \
91 { GEN12_SC_INSTDONE_EXTRA2, 0, 0, "GEN12_SC_INSTDONE_EXTRA2" }
93 #define COMMON_GEN12BASE_VEC \
94 { GEN12_SFC_DONE(0), 0, 0, "SFC_DONE[0]" }, \
95 { GEN12_SFC_DONE(1), 0, 0, "SFC_DONE[1]" }, \
96 { GEN12_SFC_DONE(2), 0, 0, "SFC_DONE[2]" }, \
97 { GEN12_SFC_DONE(3), 0, 0, "SFC_DONE[3]" }
100 static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = {
102 COMMON_GEN8BASE_GLOBAL,
103 COMMON_GEN12BASE_GLOBAL,
106 /* XE_LP Render / Compute Per-Class */
107 static const struct __guc_mmio_reg_descr xe_lp_rc_class_regs[] = {
110 COMMON_GEN12BASE_RENDER,
113 /* GEN8+ Render / Compute Per-Engine-Instance */
114 static const struct __guc_mmio_reg_descr gen8_rc_inst_regs[] = {
115 COMMON_BASE_ENGINE_INSTANCE,
118 /* GEN8+ Media Decode/Encode Per-Engine-Instance */
119 static const struct __guc_mmio_reg_descr gen8_vd_inst_regs[] = {
120 COMMON_BASE_ENGINE_INSTANCE,
123 /* XE_LP Video Enhancement Per-Class */
124 static const struct __guc_mmio_reg_descr xe_lp_vec_class_regs[] = {
125 COMMON_GEN12BASE_VEC,
128 /* GEN8+ Video Enhancement Per-Engine-Instance */
129 static const struct __guc_mmio_reg_descr gen8_vec_inst_regs[] = {
130 COMMON_BASE_ENGINE_INSTANCE,
133 /* GEN8+ Blitter Per-Engine-Instance */
134 static const struct __guc_mmio_reg_descr gen8_blt_inst_regs[] = {
135 COMMON_BASE_ENGINE_INSTANCE,
138 /* XE_LP - GSC Per-Engine-Instance */
139 static const struct __guc_mmio_reg_descr xe_lp_gsc_inst_regs[] = {
140 COMMON_BASE_ENGINE_INSTANCE,
144 static const struct __guc_mmio_reg_descr gen8_global_regs[] = {
146 COMMON_GEN8BASE_GLOBAL,
150 static const struct __guc_mmio_reg_descr gen8_rc_class_regs[] = {
156 * Empty list to prevent warnings about unknown class/instance types
157 * as not all class/instanace types have entries on all platforms.
159 static const struct __guc_mmio_reg_descr empty_regs_list[] = {
162 #define TO_GCAP_DEF_OWNER(x) (GUC_CAPTURE_LIST_INDEX_##x)
163 #define TO_GCAP_DEF_TYPE(x) (GUC_CAPTURE_LIST_TYPE_##x)
164 #define MAKE_REGLIST(regslist, regsowner, regstype, class) \
167 ARRAY_SIZE(regslist), \
168 TO_GCAP_DEF_OWNER(regsowner), \
169 TO_GCAP_DEF_TYPE(regstype), \
175 static const struct __guc_mmio_reg_descr_group gen8_lists[] = {
176 MAKE_REGLIST(gen8_global_regs, PF, GLOBAL, 0),
177 MAKE_REGLIST(gen8_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
178 MAKE_REGLIST(gen8_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
179 MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO),
180 MAKE_REGLIST(gen8_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO),
181 MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
182 MAKE_REGLIST(gen8_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
183 MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER),
184 MAKE_REGLIST(gen8_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER),
185 MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
186 MAKE_REGLIST(empty_regs_list, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
190 static const struct __guc_mmio_reg_descr_group xe_lp_lists[] = {
191 MAKE_REGLIST(xe_lp_global_regs, PF, GLOBAL, 0),
192 MAKE_REGLIST(xe_lp_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
193 MAKE_REGLIST(gen8_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
194 MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO),
195 MAKE_REGLIST(gen8_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO),
196 MAKE_REGLIST(xe_lp_vec_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
197 MAKE_REGLIST(gen8_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
198 MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER),
199 MAKE_REGLIST(gen8_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER),
200 MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
201 MAKE_REGLIST(xe_lp_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
205 static const struct __guc_mmio_reg_descr_group *
206 guc_capture_get_one_list(const struct __guc_mmio_reg_descr_group *reglists,
207 u32 owner, u32 type, u32 id)
214 for (i = 0; reglists[i].list; ++i) {
215 if (reglists[i].owner == owner && reglists[i].type == type &&
216 (reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL))
223 static struct __guc_mmio_reg_descr_group *
224 guc_capture_get_one_ext_list(struct __guc_mmio_reg_descr_group *reglists,
225 u32 owner, u32 type, u32 id)
232 for (i = 0; reglists[i].extlist; ++i) {
233 if (reglists[i].owner == owner && reglists[i].type == type &&
234 (reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL))
241 static void guc_capture_free_extlists(struct __guc_mmio_reg_descr_group *reglists)
248 while (reglists[i].extlist)
249 kfree(reglists[i++].extlist);
252 struct __ext_steer_reg {
257 static const struct __ext_steer_reg gen8_extregs[] = {
258 {"GEN8_SAMPLER_INSTDONE", GEN8_SAMPLER_INSTDONE},
259 {"GEN8_ROW_INSTDONE", GEN8_ROW_INSTDONE}
262 static const struct __ext_steer_reg xehpg_extregs[] = {
263 {"XEHPG_INSTDONE_GEOM_SVG", XEHPG_INSTDONE_GEOM_SVG}
266 static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
267 const struct __ext_steer_reg *extlist,
268 int slice_id, int subslice_id)
270 ext->reg = _MMIO(i915_mmio_reg_offset(extlist->reg));
271 ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
272 ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id);
273 ext->regname = extlist->name;
277 __alloc_ext_regs(struct __guc_mmio_reg_descr_group *newlist,
278 const struct __guc_mmio_reg_descr_group *rootlist, int num_regs)
280 struct __guc_mmio_reg_descr *list;
282 list = kcalloc(num_regs, sizeof(struct __guc_mmio_reg_descr), GFP_KERNEL);
286 newlist->extlist = list;
287 newlist->num_regs = num_regs;
288 newlist->owner = rootlist->owner;
289 newlist->engine = rootlist->engine;
290 newlist->type = rootlist->type;
296 guc_capture_alloc_steered_lists(struct intel_guc *guc,
297 const struct __guc_mmio_reg_descr_group *lists)
299 struct intel_gt *gt = guc_to_gt(guc);
300 int slice, subslice, iter, i, num_steer_regs, num_tot_regs = 0;
301 const struct __guc_mmio_reg_descr_group *list;
302 struct __guc_mmio_reg_descr_group *extlists;
303 struct __guc_mmio_reg_descr *extarray;
304 bool has_xehpg_extregs;
306 /* steered registers currently only exist for the render-class */
307 list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF,
308 GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
309 GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE);
310 /* skip if extlists was previously allocated */
311 if (!list || guc->capture->extlists)
314 has_xehpg_extregs = GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55);
316 num_steer_regs = ARRAY_SIZE(gen8_extregs);
317 if (has_xehpg_extregs)
318 num_steer_regs += ARRAY_SIZE(xehpg_extregs);
320 for_each_ss_steering(iter, gt, slice, subslice)
321 num_tot_regs += num_steer_regs;
326 /* allocate an extra for an end marker */
327 extlists = kcalloc(2, sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL);
331 if (__alloc_ext_regs(&extlists[0], list, num_tot_regs)) {
336 extarray = extlists[0].extlist;
337 for_each_ss_steering(iter, gt, slice, subslice) {
338 for (i = 0; i < ARRAY_SIZE(gen8_extregs); ++i) {
339 __fill_ext_reg(extarray, &gen8_extregs[i], slice, subslice);
343 if (has_xehpg_extregs) {
344 for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) {
345 __fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice);
351 guc_dbg(guc, "capture found %d ext-regs.\n", num_tot_regs);
352 guc->capture->extlists = extlists;
355 static const struct __guc_mmio_reg_descr_group *
356 guc_capture_get_device_reglist(struct intel_guc *guc)
358 struct drm_i915_private *i915 = guc_to_i915(guc);
359 const struct __guc_mmio_reg_descr_group *lists;
361 if (GRAPHICS_VER(i915) >= 12)
367 * For certain engine classes, there are slice and subslice
368 * level registers requiring steering. We allocate and populate
369 * these at init time based on hw config add it as an extension
370 * list at the end of the pre-populated render list.
372 guc_capture_alloc_steered_lists(guc, lists);
378 __stringify_type(u32 type)
381 case GUC_CAPTURE_LIST_TYPE_GLOBAL:
383 case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
385 case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
395 __stringify_engclass(u32 class)
398 case GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE:
399 return "Render/Compute";
400 case GUC_CAPTURE_LIST_CLASS_VIDEO:
402 case GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE:
403 return "VideoEnhance";
404 case GUC_CAPTURE_LIST_CLASS_BLITTER:
406 case GUC_CAPTURE_LIST_CLASS_GSC_OTHER:
416 guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
417 struct guc_mmio_reg *ptr, u16 num_entries)
420 const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
421 struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
422 const struct __guc_mmio_reg_descr_group *match;
423 struct __guc_mmio_reg_descr_group *matchext;
428 match = guc_capture_get_one_list(reglists, owner, type, classid);
432 for (i = 0; i < num_entries && i < match->num_regs; ++i) {
433 ptr[i].offset = match->list[i].reg.reg;
434 ptr[i].value = 0xDEADF00D;
435 ptr[i].flags = match->list[i].flags;
436 ptr[i].mask = match->list[i].mask;
439 matchext = guc_capture_get_one_ext_list(extlists, owner, type, classid);
441 for (i = match->num_regs, j = 0; i < num_entries &&
442 i < (match->num_regs + matchext->num_regs) &&
443 j < matchext->num_regs; ++i, ++j) {
444 ptr[i].offset = matchext->extlist[j].reg.reg;
445 ptr[i].value = 0xDEADF00D;
446 ptr[i].flags = matchext->extlist[j].flags;
447 ptr[i].mask = matchext->extlist[j].mask;
451 guc_dbg(guc, "Got short capture reglist init: %d out %d.\n", i, num_entries);
457 guc_cap_list_num_regs(struct intel_guc_state_capture *gc, u32 owner, u32 type, u32 classid)
459 const struct __guc_mmio_reg_descr_group *match;
460 struct __guc_mmio_reg_descr_group *matchext;
463 match = guc_capture_get_one_list(gc->reglists, owner, type, classid);
467 num_regs = match->num_regs;
469 matchext = guc_capture_get_one_ext_list(gc->extlists, owner, type, classid);
471 num_regs += matchext->num_regs;
477 guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
478 size_t *size, bool is_purpose_est)
480 struct intel_guc_state_capture *gc = guc->capture;
481 struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
485 guc_warn(guc, "No capture reglist for this device\n");
489 if (cache->is_valid) {
491 return cache->status;
494 if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF &&
495 !guc_capture_get_one_list(gc->reglists, owner, type, classid)) {
496 if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL)
497 guc_warn(guc, "Missing capture reglist: global!\n");
499 guc_warn(guc, "Missing capture reglist: %s(%u):%s(%u)!\n",
500 __stringify_type(type), type,
501 __stringify_engclass(classid), classid);
505 num_regs = guc_cap_list_num_regs(gc, owner, type, classid);
506 /* intentional empty lists can exist depending on hw config */
511 *size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) +
512 (num_regs * sizeof(struct guc_mmio_reg)));
518 intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
521 return guc_capture_getlistsize(guc, owner, type, classid, size, false);
524 static void guc_capture_create_prealloc_nodes(struct intel_guc *guc);
527 intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
530 struct intel_guc_state_capture *gc = guc->capture;
531 struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
532 struct guc_debug_capture_list *listnode;
540 if (cache->is_valid) {
541 *outptr = cache->ptr;
542 return cache->status;
546 * ADS population of input registers is a good
547 * time to pre-allocate cachelist output nodes
549 guc_capture_create_prealloc_nodes(guc);
551 ret = intel_guc_capture_getlistsize(guc, owner, type, classid, &size);
553 cache->is_valid = true;
560 caplist = kzalloc(size, GFP_KERNEL);
562 guc_dbg(guc, "Failed to alloc cached register capture list");
566 /* populate capture list header */
568 num_regs = guc_cap_list_num_regs(guc->capture, owner, type, classid);
569 listnode = (struct guc_debug_capture_list *)tmp;
570 listnode->header.info = FIELD_PREP(GUC_CAPTURELISTHDR_NUMDESCR, (u32)num_regs);
572 /* populate list of register descriptor */
573 tmp += sizeof(struct guc_debug_capture_list);
574 guc_capture_list_init(guc, owner, type, classid, (struct guc_mmio_reg *)tmp, num_regs);
576 /* cache this list */
577 cache->is_valid = true;
578 cache->ptr = caplist;
588 intel_guc_capture_getnullheader(struct intel_guc *guc,
589 void **outptr, size_t *size)
591 struct intel_guc_state_capture *gc = guc->capture;
592 int tmp = sizeof(u32) * 4;
595 if (gc->ads_null_cache) {
596 *outptr = gc->ads_null_cache;
601 null_header = kzalloc(tmp, GFP_KERNEL);
603 guc_dbg(guc, "Failed to alloc cached register capture null list");
607 gc->ads_null_cache = null_header;
608 *outptr = null_header;
615 guc_capture_output_min_size_est(struct intel_guc *guc)
617 struct intel_gt *gt = guc_to_gt(guc);
618 struct intel_engine_cs *engine;
619 enum intel_engine_id id;
620 int worst_min_size = 0;
627 * If every single engine-instance suffered a failure in quick succession but
628 * were all unrelated, then a burst of multiple error-capture events would dump
629 * registers for every one engine instance, one at a time. In this case, GuC
630 * would even dump the global-registers repeatedly.
632 * For each engine instance, there would be 1 x guc_state_capture_group_t output
633 * followed by 3 x guc_state_capture_t lists. The latter is how the register
634 * dumps are split across different register types (where the '3' are global vs class
637 for_each_engine(engine, gt, id) {
638 worst_min_size += sizeof(struct guc_state_capture_group_header_t) +
639 (3 * sizeof(struct guc_state_capture_header_t));
641 if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &tmp, true))
642 worst_min_size += tmp;
644 if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
645 engine->class, &tmp, true)) {
646 worst_min_size += tmp;
648 if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
649 engine->class, &tmp, true)) {
650 worst_min_size += tmp;
654 return worst_min_size;
658 * Add on a 3x multiplier to allow for multiple back-to-back captures occurring
659 * before the i915 can read the data out and process it
661 #define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3
663 static void check_guc_capture_size(struct intel_guc *guc)
665 int min_size = guc_capture_output_min_size_est(guc);
666 int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER;
667 u32 buffer_size = intel_guc_log_section_size_capture(&guc->log);
670 * NOTE: min_size is much smaller than the capture region allocation (DG2: <80K vs 1MB)
671 * Additionally, its based on space needed to fit all engines getting reset at once
672 * within the same G2H handler task slot. This is very unlikely. However, if GuC really
673 * does run out of space for whatever reason, we will see an separate warning message
674 * when processing the G2H event capture-notification, search for:
675 * INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE.
678 guc_warn(guc, "Failed to calculate error state capture buffer minimum size: %d!\n",
680 else if (min_size > buffer_size)
681 guc_warn(guc, "Error state capture buffer maybe small: %d < %d\n",
682 buffer_size, min_size);
683 else if (spare_size > buffer_size)
684 guc_dbg(guc, "Error state capture buffer lacks spare size: %d < %d (min = %d)\n",
685 buffer_size, spare_size, min_size);
689 * KMD Init time flows:
690 * --------------------
691 * --> alloc A: GuC input capture regs lists (registered to GuC via ADS).
692 * intel_guc_ads acquires the register lists by calling
693 * intel_guc_capture_list_size and intel_guc_capture_list_get 'n' times,
694 * where n = 1 for global-reg-list +
695 * num_engine_classes for class-reg-list +
696 * num_engine_classes for instance-reg-list
697 * (since all instances of the same engine-class type
698 * have an identical engine-instance register-list).
699 * ADS module also calls separately for PF vs VF.
701 * --> alloc B: GuC output capture buf (registered via guc_init_params(log_param))
702 * Size = #define CAPTURE_BUFFER_SIZE (warns if on too-small)
703 * Note2: 'x 3' to hold multiple capture groups
705 * GUC Runtime notify capture:
706 * --------------------------
707 * --> G2H STATE_CAPTURE_NOTIFICATION
708 * L--> intel_guc_capture_process
709 * L--> Loop through B (head..tail) and for each engine instance's
710 * err-state-captured register-list we find, we alloc 'C':
711 * --> alloc C: A capture-output-node structure that includes misc capture info along
712 * with 3 register list dumps (global, engine-class and engine-instance)
713 * This node is created from a pre-allocated list of blank nodes in
714 * guc->capture->cachelist and populated with the error-capture
715 * data from GuC and then it's added into guc->capture->outlist linked
716 * list. This list is used for matchup and printout by i915_gpu_coredump
717 * and err_print_gt, (when user invokes the error capture sysfs).
719 * GUC --> notify context reset:
720 * -----------------------------
721 * --> G2H CONTEXT RESET
722 * L--> guc_handle_context_reset --> i915_capture_error_state
723 * L--> i915_gpu_coredump(..IS_GUC_CAPTURE) --> gt_record_engines
724 * --> capture_engine(..IS_GUC_CAPTURE)
725 * L--> intel_guc_capture_get_matching_node is where
726 * detach C from internal linked list and add it into
727 * intel_engine_coredump struct (if the context and
728 * engine of the event notification matches a node
731 * User Sysfs / Debugfs
732 * --------------------
733 * --> i915_gpu_coredump_copy_to_buffer->
734 * L--> err_print_to_sgl --> err_print_gt
735 * L--> error_print_guc_captures
736 * L--> intel_guc_capture_print_node prints the
737 * register lists values of the attached node
738 * on the error-engine-dump being reported.
739 * L--> i915_reset_error_state ... -->__i915_gpu_coredump_free
740 * L--> ... cleanup_gt -->
741 * L--> intel_guc_capture_free_node returns the
742 * capture-output-node back to the internal
743 * cachelist for reuse.
747 static int guc_capture_buf_cnt(struct __guc_capture_bufstate *buf)
749 if (buf->wr >= buf->rd)
750 return (buf->wr - buf->rd);
751 return (buf->size - buf->rd) + buf->wr;
754 static int guc_capture_buf_cnt_to_end(struct __guc_capture_bufstate *buf)
756 if (buf->rd > buf->wr)
757 return (buf->size - buf->rd);
758 return (buf->wr - buf->rd);
762 * GuC's error-capture output is a ring buffer populated in a byte-stream fashion:
764 * The GuC Log buffer region for error-capture is managed like a ring buffer.
765 * The GuC firmware dumps error capture logs into this ring in a byte-stream flow.
766 * Additionally, as per the current and foreseeable future, all packed error-
767 * capture output structures are dword aligned.
769 * That said, if the GuC firmware is in the midst of writing a structure that is larger
770 * than one dword but the tail end of the err-capture buffer-region has lesser space left,
771 * we would need to extract that structure one dword at a time straddled across the end,
772 * onto the start of the ring.
774 * Below function, guc_capture_log_remove_dw is a helper for that. All callers of this
775 * function would typically do a straight-up memcpy from the ring contents and will only
776 * call this helper if their structure-extraction is straddling across the end of the
777 * ring. GuC firmware does not add any padding. The reason for the no-padding is to ease
778 * scalability for future expansion of output data types without requiring a redesign
779 * of the flow controls.
782 guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
789 if (!guc_capture_buf_cnt(buf))
793 avail = guc_capture_buf_cnt_to_end(buf);
794 if (avail >= sizeof(u32)) {
795 src_data = (u32 *)(buf->data + buf->rd);
801 guc_dbg(guc, "Register capture log not dword aligned, skipping.\n");
809 guc_capture_data_extracted(struct __guc_capture_bufstate *b,
810 int size, void *dest)
812 if (guc_capture_buf_cnt_to_end(b) >= size) {
813 memcpy(dest, (b->data + b->rd), size);
821 guc_capture_log_get_group_hdr(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
822 struct guc_state_capture_group_header_t *ghdr)
825 int fullsize = sizeof(struct guc_state_capture_group_header_t);
827 if (fullsize > guc_capture_buf_cnt(buf))
830 if (guc_capture_data_extracted(buf, fullsize, (void *)ghdr))
833 read += guc_capture_log_remove_dw(guc, buf, &ghdr->owner);
834 read += guc_capture_log_remove_dw(guc, buf, &ghdr->info);
835 if (read != fullsize)
842 guc_capture_log_get_data_hdr(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
843 struct guc_state_capture_header_t *hdr)
846 int fullsize = sizeof(struct guc_state_capture_header_t);
848 if (fullsize > guc_capture_buf_cnt(buf))
851 if (guc_capture_data_extracted(buf, fullsize, (void *)hdr))
854 read += guc_capture_log_remove_dw(guc, buf, &hdr->owner);
855 read += guc_capture_log_remove_dw(guc, buf, &hdr->info);
856 read += guc_capture_log_remove_dw(guc, buf, &hdr->lrca);
857 read += guc_capture_log_remove_dw(guc, buf, &hdr->guc_id);
858 read += guc_capture_log_remove_dw(guc, buf, &hdr->num_mmios);
859 if (read != fullsize)
866 guc_capture_log_get_register(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
867 struct guc_mmio_reg *reg)
870 int fullsize = sizeof(struct guc_mmio_reg);
872 if (fullsize > guc_capture_buf_cnt(buf))
875 if (guc_capture_data_extracted(buf, fullsize, (void *)reg))
878 read += guc_capture_log_remove_dw(guc, buf, ®->offset);
879 read += guc_capture_log_remove_dw(guc, buf, ®->value);
880 read += guc_capture_log_remove_dw(guc, buf, ®->flags);
881 read += guc_capture_log_remove_dw(guc, buf, ®->mask);
882 if (read != fullsize)
889 guc_capture_delete_one_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node)
893 for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i)
894 kfree(node->reginfo[i].regs);
895 list_del(&node->link);
900 guc_capture_delete_prealloc_nodes(struct intel_guc *guc)
902 struct __guc_capture_parsed_output *n, *ntmp;
905 * NOTE: At the end of driver operation, we must assume that we
906 * have prealloc nodes in both the cachelist as well as outlist
907 * if unclaimed error capture events occurred prior to shutdown.
909 list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link)
910 guc_capture_delete_one_node(guc, n);
912 list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link)
913 guc_capture_delete_one_node(guc, n);
917 guc_capture_add_node_to_list(struct __guc_capture_parsed_output *node,
918 struct list_head *list)
920 list_add_tail(&node->link, list);
924 guc_capture_add_node_to_outlist(struct intel_guc_state_capture *gc,
925 struct __guc_capture_parsed_output *node)
927 guc_capture_add_node_to_list(node, &gc->outlist);
931 guc_capture_add_node_to_cachelist(struct intel_guc_state_capture *gc,
932 struct __guc_capture_parsed_output *node)
934 guc_capture_add_node_to_list(node, &gc->cachelist);
938 guc_capture_init_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node)
940 struct guc_mmio_reg *tmp[GUC_CAPTURE_LIST_TYPE_MAX];
943 for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
944 tmp[i] = node->reginfo[i].regs;
945 memset(tmp[i], 0, sizeof(struct guc_mmio_reg) *
946 guc->capture->max_mmio_per_node);
948 memset(node, 0, sizeof(*node));
949 for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i)
950 node->reginfo[i].regs = tmp[i];
952 INIT_LIST_HEAD(&node->link);
955 static struct __guc_capture_parsed_output *
956 guc_capture_get_prealloc_node(struct intel_guc *guc)
958 struct __guc_capture_parsed_output *found = NULL;
960 if (!list_empty(&guc->capture->cachelist)) {
961 struct __guc_capture_parsed_output *n, *ntmp;
963 /* get first avail node from the cache list */
964 list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link) {
970 struct __guc_capture_parsed_output *n, *ntmp;
972 /* traverse down and steal back the oldest node already allocated */
973 list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
977 list_del(&found->link);
980 guc_capture_init_node(guc, found);
985 static struct __guc_capture_parsed_output *
986 guc_capture_alloc_one_node(struct intel_guc *guc)
988 struct __guc_capture_parsed_output *new;
991 new = kzalloc(sizeof(*new), GFP_KERNEL);
995 for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
996 new->reginfo[i].regs = kcalloc(guc->capture->max_mmio_per_node,
997 sizeof(struct guc_mmio_reg), GFP_KERNEL);
998 if (!new->reginfo[i].regs) {
1000 kfree(new->reginfo[--i].regs);
1005 guc_capture_init_node(guc, new);
1010 static struct __guc_capture_parsed_output *
1011 guc_capture_clone_node(struct intel_guc *guc, struct __guc_capture_parsed_output *original,
1012 u32 keep_reglist_mask)
1014 struct __guc_capture_parsed_output *new;
1017 new = guc_capture_get_prealloc_node(guc);
1023 new->is_partial = original->is_partial;
1025 /* copy reg-lists that we want to clone */
1026 for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
1027 if (keep_reglist_mask & BIT(i)) {
1028 GEM_BUG_ON(original->reginfo[i].num_regs >
1029 guc->capture->max_mmio_per_node);
1031 memcpy(new->reginfo[i].regs, original->reginfo[i].regs,
1032 original->reginfo[i].num_regs * sizeof(struct guc_mmio_reg));
1034 new->reginfo[i].num_regs = original->reginfo[i].num_regs;
1035 new->reginfo[i].vfid = original->reginfo[i].vfid;
1037 if (i == GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS) {
1038 new->eng_class = original->eng_class;
1039 } else if (i == GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) {
1040 new->eng_inst = original->eng_inst;
1041 new->guc_id = original->guc_id;
1042 new->lrca = original->lrca;
1051 __guc_capture_create_prealloc_nodes(struct intel_guc *guc)
1053 struct __guc_capture_parsed_output *node = NULL;
1056 for (i = 0; i < PREALLOC_NODES_MAX_COUNT; ++i) {
1057 node = guc_capture_alloc_one_node(guc);
1059 guc_warn(guc, "Register capture pre-alloc-cache failure\n");
1060 /* dont free the priors, use what we got and cleanup at shutdown */
1063 guc_capture_add_node_to_cachelist(guc->capture, node);
1068 guc_get_max_reglist_count(struct intel_guc *guc)
1070 int i, j, k, tmp, maxregcount = 0;
1072 for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) {
1073 for (j = 0; j < GUC_CAPTURE_LIST_TYPE_MAX; ++j) {
1074 for (k = 0; k < GUC_MAX_ENGINE_CLASSES; ++k) {
1075 if (j == GUC_CAPTURE_LIST_TYPE_GLOBAL && k > 0)
1078 tmp = guc_cap_list_num_regs(guc->capture, i, j, k);
1079 if (tmp > maxregcount)
1085 maxregcount = PREALLOC_NODES_DEFAULT_NUMREGS;
1091 guc_capture_create_prealloc_nodes(struct intel_guc *guc)
1093 /* skip if we've already done the pre-alloc */
1094 if (guc->capture->max_mmio_per_node)
1097 guc->capture->max_mmio_per_node = guc_get_max_reglist_count(guc);
1098 __guc_capture_create_prealloc_nodes(guc);
1102 guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstate *buf)
1104 struct guc_state_capture_group_header_t ghdr = {};
1105 struct guc_state_capture_header_t hdr = {};
1106 struct __guc_capture_parsed_output *node = NULL;
1107 struct guc_mmio_reg *regs = NULL;
1108 int i, numlists, numregs, ret = 0;
1109 enum guc_capture_type datatype;
1110 struct guc_mmio_reg tmp;
1111 bool is_partial = false;
1113 i = guc_capture_buf_cnt(buf);
1116 if (i % sizeof(u32)) {
1117 guc_warn(guc, "Got mis-aligned register capture entries\n");
1122 /* first get the capture group header */
1123 if (guc_capture_log_get_group_hdr(guc, buf, &ghdr)) {
1128 * we would typically expect a layout as below where n would be expected to be
1129 * anywhere between 3 to n where n > 3 if we are seeing multiple dependent engine
1130 * instances being reset together.
1131 * ____________________________________________
1133 * | ________________________________________ |
1134 * | | Capture Group Header: | |
1135 * | | - num_captures = 5 | |
1136 * | |______________________________________| |
1137 * | ________________________________________ |
1139 * | | Hdr: GLOBAL, numregs=a | |
1140 * | | ____________________________________ | |
1141 * | | | Reglist | | |
1142 * | | | - reg1, reg2, ... rega | | |
1143 * | | |__________________________________| | |
1144 * | |______________________________________| |
1145 * | ________________________________________ |
1147 * | | Hdr: CLASS=RENDER/COMPUTE, numregs=b| |
1148 * | | ____________________________________ | |
1149 * | | | Reglist | | |
1150 * | | | - reg1, reg2, ... regb | | |
1151 * | | |__________________________________| | |
1152 * | |______________________________________| |
1153 * | ________________________________________ |
1155 * | | Hdr: INSTANCE=RCS, numregs=c | |
1156 * | | ____________________________________ | |
1157 * | | | Reglist | | |
1158 * | | | - reg1, reg2, ... regc | | |
1159 * | | |__________________________________| | |
1160 * | |______________________________________| |
1161 * | ________________________________________ |
1163 * | | Hdr: CLASS=RENDER/COMPUTE, numregs=d| |
1164 * | | ____________________________________ | |
1165 * | | | Reglist | | |
1166 * | | | - reg1, reg2, ... regd | | |
1167 * | | |__________________________________| | |
1168 * | |______________________________________| |
1169 * | ________________________________________ |
1171 * | | Hdr: INSTANCE=CCS0, numregs=e | |
1172 * | | ____________________________________ | |
1173 * | | | Reglist | | |
1174 * | | | - reg1, reg2, ... rege | | |
1175 * | | |__________________________________| | |
1176 * | |______________________________________| |
1177 * |__________________________________________|
1179 is_partial = FIELD_GET(CAP_GRP_HDR_CAPTURE_TYPE, ghdr.info);
1180 numlists = FIELD_GET(CAP_GRP_HDR_NUM_CAPTURES, ghdr.info);
1182 while (numlists--) {
1183 if (guc_capture_log_get_data_hdr(guc, buf, &hdr)) {
1188 datatype = FIELD_GET(CAP_HDR_CAPTURE_TYPE, hdr.info);
1189 if (datatype > GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) {
1190 /* unknown capture type - skip over to next capture set */
1191 numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios);
1193 if (guc_capture_log_get_register(guc, buf, &tmp)) {
1201 * Based on the current capture type and what we have so far,
1202 * decide if we should add the current node into the internal
1203 * linked list for match-up when i915_gpu_coredump calls later
1204 * (and alloc a blank node for the next set of reglists)
1205 * or continue with the same node or clone the current node
1206 * but only retain the global or class registers (such as the
1207 * case of dependent engine resets).
1209 if (datatype == GUC_CAPTURE_LIST_TYPE_GLOBAL) {
1210 guc_capture_add_node_to_outlist(guc->capture, node);
1212 } else if (datatype == GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS &&
1213 node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS].num_regs) {
1214 /* Add to list, clone node and duplicate global list */
1215 guc_capture_add_node_to_outlist(guc->capture, node);
1216 node = guc_capture_clone_node(guc, node,
1217 GCAP_PARSED_REGLIST_INDEX_GLOBAL);
1218 } else if (datatype == GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE &&
1219 node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE].num_regs) {
1220 /* Add to list, clone node and duplicate global + class lists */
1221 guc_capture_add_node_to_outlist(guc->capture, node);
1222 node = guc_capture_clone_node(guc, node,
1223 (GCAP_PARSED_REGLIST_INDEX_GLOBAL |
1224 GCAP_PARSED_REGLIST_INDEX_ENGCLASS));
1229 node = guc_capture_get_prealloc_node(guc);
1234 if (datatype != GUC_CAPTURE_LIST_TYPE_GLOBAL)
1235 guc_dbg(guc, "Register capture missing global dump: %08x!\n",
1238 node->is_partial = is_partial;
1239 node->reginfo[datatype].vfid = FIELD_GET(CAP_HDR_CAPTURE_VFID, hdr.owner);
1241 case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
1242 node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info);
1243 node->eng_inst = FIELD_GET(CAP_HDR_ENGINE_INSTANCE, hdr.info);
1244 node->lrca = hdr.lrca;
1245 node->guc_id = hdr.guc_id;
1247 case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
1248 node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info);
1254 numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios);
1255 if (numregs > guc->capture->max_mmio_per_node) {
1256 guc_dbg(guc, "Register capture list extraction clipped by prealloc!\n");
1257 numregs = guc->capture->max_mmio_per_node;
1259 node->reginfo[datatype].num_regs = numregs;
1260 regs = node->reginfo[datatype].regs;
1263 if (guc_capture_log_get_register(guc, buf, ®s[i++])) {
1272 /* If we have data, add to linked list for match-up when i915_gpu_coredump calls */
1273 for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
1274 if (node->reginfo[i].regs) {
1275 guc_capture_add_node_to_outlist(guc->capture, node);
1280 if (node) /* else return it back to cache list */
1281 guc_capture_add_node_to_cachelist(guc->capture, node);
1286 static int __guc_capture_flushlog_complete(struct intel_guc *guc)
1289 INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE,
1290 GUC_CAPTURE_LOG_BUFFER
1293 return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0);
1297 static void __guc_capture_process_output(struct intel_guc *guc)
1299 unsigned int buffer_size, read_offset, write_offset, full_count;
1300 struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
1301 struct guc_log_buffer_state log_buf_state_local;
1302 struct guc_log_buffer_state *log_buf_state;
1303 struct __guc_capture_bufstate buf;
1304 void *src_data = NULL;
1308 log_buf_state = guc->log.buf_addr +
1309 (sizeof(struct guc_log_buffer_state) * GUC_CAPTURE_LOG_BUFFER);
1310 src_data = guc->log.buf_addr +
1311 intel_guc_get_log_buffer_offset(&guc->log, GUC_CAPTURE_LOG_BUFFER);
1314 * Make a copy of the state structure, inside GuC log buffer
1315 * (which is uncached mapped), on the stack to avoid reading
1316 * from it multiple times.
1318 memcpy(&log_buf_state_local, log_buf_state, sizeof(struct guc_log_buffer_state));
1319 buffer_size = intel_guc_get_log_buffer_size(&guc->log, GUC_CAPTURE_LOG_BUFFER);
1320 read_offset = log_buf_state_local.read_ptr;
1321 write_offset = log_buf_state_local.sampled_write_ptr;
1322 full_count = log_buf_state_local.buffer_full_cnt;
1324 /* Bookkeeping stuff */
1325 guc->log.stats[GUC_CAPTURE_LOG_BUFFER].flush += log_buf_state_local.flush_to_file;
1326 new_overflow = intel_guc_check_log_buf_overflow(&guc->log, GUC_CAPTURE_LOG_BUFFER,
1329 /* Now copy the actual logs. */
1330 if (unlikely(new_overflow)) {
1331 /* copy the whole buffer in case of overflow */
1333 write_offset = buffer_size;
1334 } else if (unlikely((read_offset > buffer_size) ||
1335 (write_offset > buffer_size))) {
1336 guc_err(guc, "Register capture buffer in invalid state: read = 0x%X, size = 0x%X!\n",
1337 read_offset, buffer_size);
1338 /* copy whole buffer as offsets are unreliable */
1340 write_offset = buffer_size;
1343 buf.size = buffer_size;
1344 buf.rd = read_offset;
1345 buf.wr = write_offset;
1346 buf.data = src_data;
1348 if (!uc->reset_in_progress) {
1350 ret = guc_capture_extract_reglists(guc, &buf);
1354 /* Update the state of log buffer err-cap state */
1355 log_buf_state->read_ptr = write_offset;
1356 log_buf_state->flush_to_file = 0;
1357 __guc_capture_flushlog_complete(guc);
1360 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
1363 guc_capture_reg_to_str(const struct intel_guc *guc, u32 owner, u32 type,
1364 u32 class, u32 id, u32 offset, u32 *is_ext)
1366 const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
1367 struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
1368 const struct __guc_mmio_reg_descr_group *match;
1369 struct __guc_mmio_reg_descr_group *matchext;
1376 match = guc_capture_get_one_list(reglists, owner, type, id);
1380 for (j = 0; j < match->num_regs; ++j) {
1381 if (offset == match->list[j].reg.reg)
1382 return match->list[j].regname;
1385 matchext = guc_capture_get_one_ext_list(extlists, owner, type, id);
1388 for (j = 0; j < matchext->num_regs; ++j) {
1389 if (offset == matchext->extlist[j].reg.reg) {
1391 return matchext->extlist[j].regname;
1399 #define GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng) \
1401 i915_error_printf(ebuf, " i915-Eng-Name: %s command stream\n", \
1403 i915_error_printf(ebuf, " i915-Eng-Inst-Class: 0x%02x\n", (eng)->class); \
1404 i915_error_printf(ebuf, " i915-Eng-Inst-Id: 0x%02x\n", (eng)->instance); \
1405 i915_error_printf(ebuf, " i915-Eng-LogicalMask: 0x%08x\n", \
1406 (eng)->logical_mask); \
1409 #define GCAP_PRINT_GUC_INST_INFO(ebuf, node) \
1411 i915_error_printf(ebuf, " GuC-Engine-Inst-Id: 0x%08x\n", \
1412 (node)->eng_inst); \
1413 i915_error_printf(ebuf, " GuC-Context-Id: 0x%08x\n", (node)->guc_id); \
1414 i915_error_printf(ebuf, " LRCA: 0x%08x\n", (node)->lrca); \
1417 int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
1418 const struct intel_engine_coredump *ee)
1420 const char *grptype[GUC_STATE_CAPTURE_GROUP_TYPE_MAX] = {
1424 const char *datatype[GUC_CAPTURE_LIST_TYPE_MAX] = {
1429 struct intel_guc_state_capture *cap;
1430 struct __guc_capture_parsed_output *node;
1431 struct intel_engine_cs *eng;
1432 struct guc_mmio_reg *regs;
1433 struct intel_guc *guc;
1440 cap = ee->guc_capture;
1441 if (!cap || !ee->engine)
1444 guc = &ee->engine->gt->uc.guc;
1446 i915_error_printf(ebuf, "global --- GuC Error Capture on %s command stream:\n",
1449 node = ee->guc_capture_node;
1451 i915_error_printf(ebuf, " No matching ee-node\n");
1455 i915_error_printf(ebuf, "Coverage: %s\n", grptype[node->is_partial]);
1457 for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
1458 i915_error_printf(ebuf, " RegListType: %s\n",
1459 datatype[i % GUC_CAPTURE_LIST_TYPE_MAX]);
1460 i915_error_printf(ebuf, " Owner-Id: %d\n", node->reginfo[i].vfid);
1463 case GUC_CAPTURE_LIST_TYPE_GLOBAL:
1466 case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
1467 i915_error_printf(ebuf, " GuC-Eng-Class: %d\n", node->eng_class);
1468 i915_error_printf(ebuf, " i915-Eng-Class: %d\n",
1469 guc_class_to_engine_class(node->eng_class));
1471 case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
1472 eng = intel_guc_lookup_engine(guc, node->eng_class, node->eng_inst);
1474 GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng);
1476 i915_error_printf(ebuf, " i915-Eng-Lookup Fail!\n");
1477 GCAP_PRINT_GUC_INST_INFO(ebuf, node);
1481 numregs = node->reginfo[i].num_regs;
1482 i915_error_printf(ebuf, " NumRegs: %d\n", numregs);
1485 regs = node->reginfo[i].regs;
1486 str = guc_capture_reg_to_str(guc, GUC_CAPTURE_LIST_INDEX_PF, i,
1487 node->eng_class, 0, regs[j].offset, &is_ext);
1489 i915_error_printf(ebuf, " REG-0x%08x", regs[j].offset);
1491 i915_error_printf(ebuf, " %s", str);
1493 i915_error_printf(ebuf, "[%ld][%ld]",
1494 FIELD_GET(GUC_REGSET_STEERING_GROUP, regs[j].flags),
1495 FIELD_GET(GUC_REGSET_STEERING_INSTANCE, regs[j].flags));
1496 i915_error_printf(ebuf, ": 0x%08x\n", regs[j].value);
1503 #endif //CONFIG_DRM_I915_CAPTURE_ERROR
1505 static void guc_capture_find_ecode(struct intel_engine_coredump *ee)
1507 struct gcap_reg_list_info *reginfo;
1508 struct guc_mmio_reg *regs;
1509 i915_reg_t reg_ipehr = RING_IPEHR(0);
1510 i915_reg_t reg_instdone = RING_INSTDONE(0);
1513 if (!ee->guc_capture_node)
1516 reginfo = ee->guc_capture_node->reginfo + GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE;
1517 regs = reginfo->regs;
1518 for (i = 0; i < reginfo->num_regs; i++) {
1519 if (regs[i].offset == reg_ipehr.reg)
1520 ee->ipehr = regs[i].value;
1521 else if (regs[i].offset == reg_instdone.reg)
1522 ee->instdone.instdone = regs[i].value;
1526 void intel_guc_capture_free_node(struct intel_engine_coredump *ee)
1528 if (!ee || !ee->guc_capture_node)
1531 guc_capture_add_node_to_cachelist(ee->guc_capture, ee->guc_capture_node);
1532 ee->guc_capture = NULL;
1533 ee->guc_capture_node = NULL;
1536 bool intel_guc_capture_is_matching_engine(struct intel_gt *gt,
1537 struct intel_context *ce,
1538 struct intel_engine_cs *engine)
1540 struct __guc_capture_parsed_output *n;
1541 struct intel_guc *guc;
1543 if (!gt || !ce || !engine)
1551 * Look for a matching GuC reported error capture node from
1552 * the internal output link-list based on lrca, guc-id and engine
1555 list_for_each_entry(n, &guc->capture->outlist, link) {
1556 if (n->eng_inst == GUC_ID_TO_ENGINE_INSTANCE(engine->guc_id) &&
1557 n->eng_class == GUC_ID_TO_ENGINE_CLASS(engine->guc_id) &&
1558 n->guc_id == ce->guc_id.id &&
1559 (n->lrca & CTX_GTT_ADDRESS_MASK) == (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK))
1566 void intel_guc_capture_get_matching_node(struct intel_gt *gt,
1567 struct intel_engine_coredump *ee,
1568 struct intel_context *ce)
1570 struct __guc_capture_parsed_output *n, *ntmp;
1571 struct intel_guc *guc;
1573 if (!gt || !ee || !ce)
1580 GEM_BUG_ON(ee->guc_capture_node);
1583 * Look for a matching GuC reported error capture node from
1584 * the internal output link-list based on lrca, guc-id and engine
1587 list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
1588 if (n->eng_inst == GUC_ID_TO_ENGINE_INSTANCE(ee->engine->guc_id) &&
1589 n->eng_class == GUC_ID_TO_ENGINE_CLASS(ee->engine->guc_id) &&
1590 n->guc_id == ce->guc_id.id &&
1591 (n->lrca & CTX_GTT_ADDRESS_MASK) == (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
1593 ee->guc_capture_node = n;
1594 ee->guc_capture = guc->capture;
1595 guc_capture_find_ecode(ee);
1600 guc_warn(guc, "No register capture node found for 0x%04X / 0x%08X\n",
1601 ce->guc_id.id, ce->lrc.lrca);
1604 void intel_guc_capture_process(struct intel_guc *guc)
1607 __guc_capture_process_output(guc);
1611 guc_capture_free_ads_cache(struct intel_guc_state_capture *gc)
1614 struct __guc_capture_ads_cache *cache;
1616 for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) {
1617 for (j = 0; j < GUC_CAPTURE_LIST_TYPE_MAX; ++j) {
1618 for (k = 0; k < GUC_MAX_ENGINE_CLASSES; ++k) {
1619 cache = &gc->ads_cache[i][j][k];
1620 if (cache->is_valid)
1625 kfree(gc->ads_null_cache);
1628 void intel_guc_capture_destroy(struct intel_guc *guc)
1633 guc_capture_free_ads_cache(guc->capture);
1635 guc_capture_delete_prealloc_nodes(guc);
1637 guc_capture_free_extlists(guc->capture->extlists);
1638 kfree(guc->capture->extlists);
1640 kfree(guc->capture);
1641 guc->capture = NULL;
1644 int intel_guc_capture_init(struct intel_guc *guc)
1646 guc->capture = kzalloc(sizeof(*guc->capture), GFP_KERNEL);
1650 guc->capture->reglists = guc_capture_get_device_reglist(guc);
1652 INIT_LIST_HEAD(&guc->capture->outlist);
1653 INIT_LIST_HEAD(&guc->capture->cachelist);
1655 check_guc_capture_size(guc);