1 // SPDX-License-Identifier: MIT
3 * Copyright © 2018 Intel Corporation
6 #include "gem/i915_gem_internal.h"
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
10 #include "i915_selftest.h"
11 #include "intel_reset.h"
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/igt_reset.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/intel_scheduler_helpers.h"
17 #include "selftests/mock_drm.h"
19 #include "gem/selftests/igt_gem_utils.h"
20 #include "gem/selftests/mock_context.h"
22 static const struct wo_register {
23 enum intel_platform platform;
26 { INTEL_GEMINILAKE, 0x731c }
30 struct i915_wa_list gt_wa_list;
32 struct i915_wa_list wa_list;
33 struct i915_wa_list ctx_wa_list;
34 } engine[I915_NUM_ENGINES];
37 static int request_add_sync(struct i915_request *rq, int err)
41 if (i915_request_wait(rq, 0, HZ / 5) < 0)
48 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
54 if (spin && !igt_wait_for_spinner(spin, rq))
62 reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
64 struct intel_engine_cs *engine;
65 enum intel_engine_id id;
67 memset(lists, 0, sizeof(*lists));
69 wa_init_start(&lists->gt_wa_list, gt, "GT_REF", "global");
70 gt_init_workarounds(gt, &lists->gt_wa_list);
71 wa_init_finish(&lists->gt_wa_list);
73 for_each_engine(engine, gt, id) {
74 struct i915_wa_list *wal = &lists->engine[id].wa_list;
76 wa_init_start(wal, gt, "REF", engine->name);
77 engine_init_workarounds(engine, wal);
80 __intel_engine_init_ctx_wa(engine,
81 &lists->engine[id].ctx_wa_list,
87 reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
89 struct intel_engine_cs *engine;
90 enum intel_engine_id id;
92 for_each_engine(engine, gt, id)
93 intel_wa_list_free(&lists->engine[id].wa_list);
95 intel_wa_list_free(&lists->gt_wa_list);
98 static struct drm_i915_gem_object *
99 read_nonprivs(struct intel_context *ce)
101 struct intel_engine_cs *engine = ce->engine;
102 const u32 base = engine->mmio_base;
103 struct drm_i915_gem_object *result;
104 struct i915_request *rq;
105 struct i915_vma *vma;
110 result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
114 i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
116 cs = i915_gem_object_pin_map_unlocked(result, I915_MAP_WB);
121 memset(cs, 0xc5, PAGE_SIZE);
122 i915_gem_object_flush_map(result);
123 i915_gem_object_unpin_map(result);
125 vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
131 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
135 rq = intel_context_create_request(ce);
141 err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
145 srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
146 if (GRAPHICS_VER(engine->i915) >= 8)
149 cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
155 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
157 *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
158 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
161 intel_ring_advance(rq, cs);
163 i915_request_add(rq);
169 i915_request_add(rq);
173 i915_gem_object_put(result);
178 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
180 i915_reg_t reg = i < engine->whitelist.count ?
181 engine->whitelist.list[i].reg :
182 RING_NOPID(engine->mmio_base);
184 return i915_mmio_reg_offset(reg);
188 print_results(const struct intel_engine_cs *engine, const u32 *results)
192 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
193 u32 expected = get_whitelist_reg(engine, i);
194 u32 actual = results[i];
196 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
197 i, expected, actual);
201 static int check_whitelist(struct intel_context *ce)
203 struct intel_engine_cs *engine = ce->engine;
204 struct drm_i915_gem_object *results;
205 struct intel_wedge_me wedge;
210 results = read_nonprivs(ce);
212 return PTR_ERR(results);
215 i915_gem_object_lock(results, NULL);
216 intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
217 err = i915_gem_object_set_to_cpu_domain(results, false);
219 if (intel_gt_is_wedged(engine->gt))
224 vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
226 err = PTR_ERR(vaddr);
230 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
231 u32 expected = get_whitelist_reg(engine, i);
232 u32 actual = vaddr[i];
234 if (expected != actual) {
235 print_results(engine, vaddr);
236 pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
237 i, expected, actual);
244 i915_gem_object_unpin_map(results);
246 i915_gem_object_unlock(results);
247 i915_gem_object_put(results);
251 static int do_device_reset(struct intel_engine_cs *engine)
253 intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
257 static int do_engine_reset(struct intel_engine_cs *engine)
259 return intel_engine_reset(engine, "live_workarounds");
262 static int do_guc_reset(struct intel_engine_cs *engine)
264 /* Currently a no-op as the reset is handled by GuC */
269 switch_to_scratch_context(struct intel_engine_cs *engine,
270 struct igt_spinner *spin,
271 struct i915_request **rq)
273 struct intel_context *ce;
276 ce = intel_context_create(engine);
280 *rq = igt_spinner_create_request(spin, ce, MI_NOOP);
281 intel_context_put(ce);
289 err = request_add_spin(*rq, spin);
292 igt_spinner_end(spin);
297 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
298 int (*reset)(struct intel_engine_cs *),
301 struct intel_context *ce, *tmp;
302 struct igt_spinner spin;
303 struct i915_request *rq;
304 intel_wakeref_t wakeref;
307 pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
308 engine->whitelist.count, engine->name, name);
310 ce = intel_context_create(engine);
314 err = igt_spinner_init(&spin, engine->gt);
318 err = check_whitelist(ce);
320 pr_err("Invalid whitelist *before* %s reset!\n", name);
324 err = switch_to_scratch_context(engine, &spin, &rq);
328 /* Ensure the spinner hasn't aborted */
329 if (i915_request_completed(rq)) {
330 pr_err("%s spinner failed to start\n", name);
335 with_intel_runtime_pm(engine->uncore->rpm, wakeref)
338 /* Ensure the reset happens and kills the engine */
340 err = intel_selftest_wait_for_rq(rq);
342 igt_spinner_end(&spin);
345 pr_err("%s reset failed\n", name);
349 err = check_whitelist(ce);
351 pr_err("Whitelist not preserved in context across %s reset!\n",
356 tmp = intel_context_create(engine);
361 intel_context_put(ce);
364 err = check_whitelist(ce);
366 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
372 igt_spinner_fini(&spin);
374 intel_context_put(ce);
378 static struct i915_vma *create_batch(struct i915_address_space *vm)
380 struct drm_i915_gem_object *obj;
381 struct i915_vma *vma;
384 obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
386 return ERR_CAST(obj);
388 vma = i915_vma_instance(obj, vm, NULL);
394 err = i915_vma_pin(vma, 0, 0, PIN_USER);
401 i915_gem_object_put(obj);
405 static u32 reg_write(u32 old, u32 new, u32 rsvd)
407 if (rsvd == 0x0000ffff) {
409 old |= new & (new >> 16);
418 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
420 enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
423 if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
424 RING_FORCE_TO_NONPRIV_ACCESS_WR)
427 for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
428 if (wo_registers[i].platform == platform &&
429 wo_registers[i].reg == reg)
436 static bool timestamp(const struct intel_engine_cs *engine, u32 reg)
438 reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
450 static bool ro_register(u32 reg)
452 if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
453 RING_FORCE_TO_NONPRIV_ACCESS_RD)
459 static int whitelist_writable_count(struct intel_engine_cs *engine)
461 int count = engine->whitelist.count;
464 for (i = 0; i < engine->whitelist.count; i++) {
465 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
467 if (ro_register(reg))
474 static int check_dirty_whitelist(struct intel_context *ce)
476 const u32 values[] = {
502 struct intel_engine_cs *engine = ce->engine;
503 struct i915_vma *scratch;
504 struct i915_vma *batch;
505 int err = 0, i, v, sz;
508 sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
509 scratch = __vm_create_scratch_for_read_pinned(ce->vm, sz);
511 return PTR_ERR(scratch);
513 batch = create_batch(ce->vm);
515 err = PTR_ERR(batch);
519 for (i = 0; i < engine->whitelist.count; i++) {
520 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
521 struct i915_gem_ww_ctx ww;
522 u64 addr = i915_vma_offset(scratch);
523 struct i915_request *rq;
529 if (wo_register(engine, reg))
532 if (timestamp(engine, reg))
533 continue; /* timestamps are expected to autoincrement */
535 ro_reg = ro_register(reg);
537 i915_gem_ww_ctx_init(&ww, false);
540 err = i915_gem_object_lock(scratch->obj, &ww);
542 err = i915_gem_object_lock(batch->obj, &ww);
544 err = intel_context_pin_ww(ce, &ww);
548 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
554 results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
555 if (IS_ERR(results)) {
556 err = PTR_ERR(results);
557 goto out_unmap_batch;
560 /* Clear non priv flags */
561 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
563 srm = MI_STORE_REGISTER_MEM;
564 lrm = MI_LOAD_REGISTER_MEM;
565 if (GRAPHICS_VER(engine->i915) >= 8)
568 pr_debug("%s: Writing garbage to %x\n",
574 *cs++ = lower_32_bits(addr);
575 *cs++ = upper_32_bits(addr);
578 for (v = 0; v < ARRAY_SIZE(values); v++) {
580 *cs++ = MI_LOAD_REGISTER_IMM(1);
587 *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
588 *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
591 for (v = 0; v < ARRAY_SIZE(values); v++) {
593 *cs++ = MI_LOAD_REGISTER_IMM(1);
600 *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
601 *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
604 GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
606 /* LRM original -- don't leave garbage in the context! */
609 *cs++ = lower_32_bits(addr);
610 *cs++ = upper_32_bits(addr);
612 *cs++ = MI_BATCH_BUFFER_END;
614 i915_gem_object_flush_map(batch->obj);
615 i915_gem_object_unpin_map(batch->obj);
616 intel_gt_chipset_flush(engine->gt);
619 rq = i915_request_create(ce);
622 goto out_unmap_scratch;
625 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
626 err = engine->emit_init_breadcrumb(rq);
631 err = i915_vma_move_to_active(batch, rq, 0);
635 err = i915_vma_move_to_active(scratch, rq,
640 err = engine->emit_bb_start(rq,
641 i915_vma_offset(batch), PAGE_SIZE,
647 err = request_add_sync(rq, err);
649 pr_err("%s: Futzing %x timedout; cancelling test\n",
651 intel_gt_set_wedged(engine->gt);
652 goto out_unmap_scratch;
655 GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
657 /* detect write masking */
658 rsvd = results[ARRAY_SIZE(values)];
660 pr_err("%s: Unable to write to whitelisted register %x\n",
663 goto out_unmap_scratch;
671 for (v = 0; v < ARRAY_SIZE(values); v++) {
675 expect = reg_write(expect, values[v], rsvd);
677 if (results[idx] != expect)
681 for (v = 0; v < ARRAY_SIZE(values); v++) {
685 expect = reg_write(expect, ~values[v], rsvd);
687 if (results[idx] != expect)
692 pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
693 engine->name, err, reg);
696 pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
697 engine->name, reg, results[0]);
699 pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
700 engine->name, reg, results[0], rsvd);
704 for (v = 0; v < ARRAY_SIZE(values); v++) {
710 expect = reg_write(expect, w, rsvd);
711 pr_info("Wrote %08x, read %08x, expect %08x\n",
712 w, results[idx], expect);
715 for (v = 0; v < ARRAY_SIZE(values); v++) {
721 expect = reg_write(expect, w, rsvd);
722 pr_info("Wrote %08x, read %08x, expect %08x\n",
723 w, results[idx], expect);
730 i915_gem_object_unpin_map(scratch->obj);
733 i915_gem_object_unpin_map(batch->obj);
735 intel_context_unpin(ce);
737 if (err == -EDEADLK) {
738 err = i915_gem_ww_ctx_backoff(&ww);
742 i915_gem_ww_ctx_fini(&ww);
747 if (igt_flush_test(engine->i915))
750 i915_vma_unpin_and_release(&batch, 0);
752 i915_vma_unpin_and_release(&scratch, 0);
756 static int live_dirty_whitelist(void *arg)
758 struct intel_gt *gt = arg;
759 struct intel_engine_cs *engine;
760 enum intel_engine_id id;
762 /* Can the user write to the whitelisted registers? */
764 if (GRAPHICS_VER(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
767 for_each_engine(engine, gt, id) {
768 struct intel_context *ce;
771 if (engine->whitelist.count == 0)
774 ce = intel_context_create(engine);
778 err = check_dirty_whitelist(ce);
779 intel_context_put(ce);
787 static int live_reset_whitelist(void *arg)
789 struct intel_gt *gt = arg;
790 struct intel_engine_cs *engine;
791 enum intel_engine_id id;
794 /* If we reset the gpu, we should not lose the RING_NONPRIV */
795 igt_global_reset_lock(gt);
797 for_each_engine(engine, gt, id) {
798 if (engine->whitelist.count == 0)
801 if (intel_has_reset_engine(gt)) {
802 if (intel_engine_uses_guc(engine)) {
803 struct intel_selftest_saved_policy saved;
806 err = intel_selftest_modify_policy(engine, &saved,
807 SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
811 err = check_whitelist_across_reset(engine,
815 err2 = intel_selftest_restore_policy(engine, &saved);
819 err = check_whitelist_across_reset(engine,
828 if (intel_has_gpu_reset(gt)) {
829 err = check_whitelist_across_reset(engine,
838 igt_global_reset_unlock(gt);
842 static int read_whitelisted_registers(struct intel_context *ce,
843 struct i915_vma *results)
845 struct intel_engine_cs *engine = ce->engine;
846 struct i915_request *rq;
850 rq = intel_context_create_request(ce);
854 err = igt_vma_move_to_active_unlocked(results, rq, EXEC_OBJECT_WRITE);
858 srm = MI_STORE_REGISTER_MEM;
859 if (GRAPHICS_VER(engine->i915) >= 8)
862 cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
868 for (i = 0; i < engine->whitelist.count; i++) {
869 u64 offset = i915_vma_offset(results) + sizeof(u32) * i;
870 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
872 /* Clear non priv flags */
873 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
877 *cs++ = lower_32_bits(offset);
878 *cs++ = upper_32_bits(offset);
880 intel_ring_advance(rq, cs);
883 return request_add_sync(rq, err);
886 static int scrub_whitelisted_registers(struct intel_context *ce)
888 struct intel_engine_cs *engine = ce->engine;
889 struct i915_request *rq;
890 struct i915_vma *batch;
894 batch = create_batch(ce->vm);
896 return PTR_ERR(batch);
898 cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
904 *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
905 for (i = 0; i < engine->whitelist.count; i++) {
906 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
908 if (ro_register(reg))
911 /* Clear non priv flags */
912 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
917 *cs++ = MI_BATCH_BUFFER_END;
919 i915_gem_object_flush_map(batch->obj);
920 intel_gt_chipset_flush(engine->gt);
922 rq = intel_context_create_request(ce);
928 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
929 err = engine->emit_init_breadcrumb(rq);
934 err = igt_vma_move_to_active_unlocked(batch, rq, 0);
938 /* Perform the writes from an unprivileged "user" batch */
939 err = engine->emit_bb_start(rq, i915_vma_offset(batch), 0, 0);
942 err = request_add_sync(rq, err);
945 i915_gem_object_unpin_map(batch->obj);
947 i915_vma_unpin_and_release(&batch, 0);
956 static bool find_reg(struct drm_i915_private *i915,
958 const struct regmask *tbl,
961 u32 offset = i915_mmio_reg_offset(reg);
964 if (GRAPHICS_VER(i915) == tbl->graphics_ver &&
965 i915_mmio_reg_offset(tbl->reg) == offset)
973 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
975 /* Alas, we must pardon some whitelists. Mistakes already made */
976 static const struct regmask pardon[] = {
977 { GEN9_CTX_PREEMPT_REG, 9 },
978 { _MMIO(0xb118), 9 }, /* GEN8_L3SQCREG4 */
981 return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
984 static bool result_eq(struct intel_engine_cs *engine,
985 u32 a, u32 b, i915_reg_t reg)
987 if (a != b && !pardon_reg(engine->i915, reg)) {
988 pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
989 i915_mmio_reg_offset(reg), a, b);
996 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
998 /* Some registers do not seem to behave and our writes unreadable */
999 static const struct regmask wo[] = {
1000 { GEN9_SLICE_COMMON_ECO_CHICKEN1, 9 },
1003 return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
1006 static bool result_neq(struct intel_engine_cs *engine,
1007 u32 a, u32 b, i915_reg_t reg)
1009 if (a == b && !writeonly_reg(engine->i915, reg)) {
1010 pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
1011 i915_mmio_reg_offset(reg), a);
1019 check_whitelisted_registers(struct intel_engine_cs *engine,
1022 bool (*fn)(struct intel_engine_cs *engine,
1029 a = i915_gem_object_pin_map_unlocked(A->obj, I915_MAP_WB);
1033 b = i915_gem_object_pin_map_unlocked(B->obj, I915_MAP_WB);
1040 for (i = 0; i < engine->whitelist.count; i++) {
1041 const struct i915_wa *wa = &engine->whitelist.list[i];
1043 if (i915_mmio_reg_offset(wa->reg) &
1044 RING_FORCE_TO_NONPRIV_ACCESS_RD)
1047 if (!fn(engine, a[i], b[i], wa->reg))
1051 i915_gem_object_unpin_map(B->obj);
1053 i915_gem_object_unpin_map(A->obj);
1057 static int live_isolated_whitelist(void *arg)
1059 struct intel_gt *gt = arg;
1061 struct i915_vma *scratch[2];
1063 struct intel_engine_cs *engine;
1064 enum intel_engine_id id;
1068 * Check that a write into a whitelist register works, but
1069 * invisible to a second context.
1072 if (!intel_engines_has_context_isolation(gt->i915))
1075 for (i = 0; i < ARRAY_SIZE(client); i++) {
1076 client[i].scratch[0] =
1077 __vm_create_scratch_for_read_pinned(gt->vm, 4096);
1078 if (IS_ERR(client[i].scratch[0])) {
1079 err = PTR_ERR(client[i].scratch[0]);
1083 client[i].scratch[1] =
1084 __vm_create_scratch_for_read_pinned(gt->vm, 4096);
1085 if (IS_ERR(client[i].scratch[1])) {
1086 err = PTR_ERR(client[i].scratch[1]);
1087 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1092 for_each_engine(engine, gt, id) {
1093 struct intel_context *ce[2];
1095 if (!engine->kernel_context->vm)
1098 if (!whitelist_writable_count(engine))
1101 ce[0] = intel_context_create(engine);
1102 if (IS_ERR(ce[0])) {
1103 err = PTR_ERR(ce[0]);
1106 ce[1] = intel_context_create(engine);
1107 if (IS_ERR(ce[1])) {
1108 err = PTR_ERR(ce[1]);
1109 intel_context_put(ce[0]);
1113 /* Read default values */
1114 err = read_whitelisted_registers(ce[0], client[0].scratch[0]);
1118 /* Try to overwrite registers (should only affect ctx0) */
1119 err = scrub_whitelisted_registers(ce[0]);
1123 /* Read values from ctx1, we expect these to be defaults */
1124 err = read_whitelisted_registers(ce[1], client[1].scratch[0]);
1128 /* Verify that both reads return the same default values */
1129 err = check_whitelisted_registers(engine,
1130 client[0].scratch[0],
1131 client[1].scratch[0],
1136 /* Read back the updated values in ctx0 */
1137 err = read_whitelisted_registers(ce[0], client[0].scratch[1]);
1141 /* User should be granted privilege to overwhite regs */
1142 err = check_whitelisted_registers(engine,
1143 client[0].scratch[0],
1144 client[0].scratch[1],
1147 intel_context_put(ce[1]);
1148 intel_context_put(ce[0]);
1154 for (i = 0; i < ARRAY_SIZE(client); i++) {
1155 i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1156 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1159 if (igt_flush_test(gt->i915))
1166 verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists,
1169 struct intel_engine_cs *engine;
1170 enum intel_engine_id id;
1173 ok &= wa_list_verify(gt, &lists->gt_wa_list, str);
1175 for_each_engine(engine, gt, id) {
1176 struct intel_context *ce;
1178 ce = intel_context_create(engine);
1182 ok &= engine_wa_list_verify(ce,
1183 &lists->engine[id].wa_list,
1186 ok &= engine_wa_list_verify(ce,
1187 &lists->engine[id].ctx_wa_list,
1190 intel_context_put(ce);
1197 live_gpu_reset_workarounds(void *arg)
1199 struct intel_gt *gt = arg;
1200 intel_wakeref_t wakeref;
1201 struct wa_lists *lists;
1204 if (!intel_has_gpu_reset(gt))
1207 lists = kzalloc(sizeof(*lists), GFP_KERNEL);
1211 pr_info("Verifying after GPU reset...\n");
1213 igt_global_reset_lock(gt);
1214 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1216 reference_lists_init(gt, lists);
1218 ok = verify_wa_lists(gt, lists, "before reset");
1222 intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1224 ok = verify_wa_lists(gt, lists, "after reset");
1227 reference_lists_fini(gt, lists);
1228 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1229 igt_global_reset_unlock(gt);
1232 return ok ? 0 : -ESRCH;
1236 live_engine_reset_workarounds(void *arg)
1238 struct intel_gt *gt = arg;
1239 struct intel_engine_cs *engine;
1240 enum intel_engine_id id;
1241 struct intel_context *ce;
1242 struct igt_spinner spin;
1243 struct i915_request *rq;
1244 intel_wakeref_t wakeref;
1245 struct wa_lists *lists;
1248 if (!intel_has_reset_engine(gt))
1251 lists = kzalloc(sizeof(*lists), GFP_KERNEL);
1255 igt_global_reset_lock(gt);
1256 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1258 reference_lists_init(gt, lists);
1260 for_each_engine(engine, gt, id) {
1261 struct intel_selftest_saved_policy saved;
1262 bool using_guc = intel_engine_uses_guc(engine);
1266 pr_info("Verifying after %s reset...\n", engine->name);
1267 ret = intel_selftest_modify_policy(engine, &saved,
1268 SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
1272 ce = intel_context_create(engine);
1279 ok = verify_wa_lists(gt, lists, "before reset");
1285 ret = intel_engine_reset(engine, "live_workarounds:idle");
1287 pr_err("%s: Reset failed while idle\n", engine->name);
1291 ok = verify_wa_lists(gt, lists, "after idle reset");
1298 ret = igt_spinner_init(&spin, engine->gt);
1302 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1305 igt_spinner_fini(&spin);
1309 ret = request_add_spin(rq, &spin);
1311 pr_err("%s: Spinner failed to start\n", engine->name);
1312 igt_spinner_fini(&spin);
1316 /* Ensure the spinner hasn't aborted */
1317 if (i915_request_completed(rq)) {
1323 ret = intel_engine_reset(engine, "live_workarounds:active");
1325 pr_err("%s: Reset failed on an active spinner\n",
1327 igt_spinner_fini(&spin);
1332 /* Ensure the reset happens and kills the engine */
1334 ret = intel_selftest_wait_for_rq(rq);
1337 igt_spinner_end(&spin);
1338 igt_spinner_fini(&spin);
1340 ok = verify_wa_lists(gt, lists, "after busy reset");
1345 intel_context_put(ce);
1348 ret2 = intel_selftest_restore_policy(engine, &saved);
1355 reference_lists_fini(gt, lists);
1356 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1357 igt_global_reset_unlock(gt);
1360 igt_flush_test(gt->i915);
1365 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1367 static const struct i915_subtest tests[] = {
1368 SUBTEST(live_dirty_whitelist),
1369 SUBTEST(live_reset_whitelist),
1370 SUBTEST(live_isolated_whitelist),
1371 SUBTEST(live_gpu_reset_workarounds),
1372 SUBTEST(live_engine_reset_workarounds),
1375 if (intel_gt_is_wedged(to_gt(i915)))
1378 return intel_gt_live_subtests(tests, to_gt(i915));