1 // SPDX-License-Identifier: MIT
3 * Copyright © 2018 Intel Corporation
6 #include <linux/prime_numbers.h>
8 #include "gem/i915_gem_internal.h"
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_engine_heartbeat.h"
11 #include "gt/intel_reset.h"
12 #include "gt/selftest_engine_heartbeat.h"
14 #include "i915_selftest.h"
15 #include "selftests/i915_random.h"
16 #include "selftests/igt_flush_test.h"
17 #include "selftests/igt_live_test.h"
18 #include "selftests/igt_spinner.h"
19 #include "selftests/lib_sw_fence.h"
21 #include "gem/selftests/igt_gem_utils.h"
22 #include "gem/selftests/mock_context.h"
24 #define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
26 #define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
28 static bool is_active(struct i915_request *rq)
30 if (i915_request_is_active(rq))
33 if (i915_request_on_hold(rq))
36 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
42 static int wait_for_submit(struct intel_engine_cs *engine,
43 struct i915_request *rq,
44 unsigned long timeout)
46 /* Ignore our own attempts to suppress excess tasklets */
47 tasklet_hi_schedule(&engine->sched_engine->tasklet);
51 bool done = time_after(jiffies, timeout);
53 if (i915_request_completed(rq)) /* that was quick! */
56 /* Wait until the HW has acknowleged the submission (or err) */
57 intel_engine_flush_submission(engine);
58 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
68 static int wait_for_reset(struct intel_engine_cs *engine,
69 struct i915_request *rq,
70 unsigned long timeout)
76 intel_engine_flush_submission(engine);
78 if (READ_ONCE(engine->execlists.pending[0]))
81 if (i915_request_completed(rq))
84 if (READ_ONCE(rq->fence.error))
86 } while (time_before(jiffies, timeout));
88 if (rq->fence.error != -EIO) {
89 pr_err("%s: hanging request %llx:%lld not reset\n",
96 /* Give the request a jiffy to complete after flushing the worker */
97 if (i915_request_wait(rq, 0,
98 max(0l, (long)(timeout - jiffies)) + 1) < 0) {
99 pr_err("%s: hanging request %llx:%lld did not complete\n",
109 static int live_sanitycheck(void *arg)
111 struct intel_gt *gt = arg;
112 struct intel_engine_cs *engine;
113 enum intel_engine_id id;
114 struct igt_spinner spin;
117 if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
120 if (igt_spinner_init(&spin, gt))
123 for_each_engine(engine, gt, id) {
124 struct intel_context *ce;
125 struct i915_request *rq;
127 ce = intel_context_create(engine);
133 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
139 i915_request_add(rq);
140 if (!igt_wait_for_spinner(&spin, rq)) {
141 GEM_TRACE("spinner failed to start\n");
143 intel_gt_set_wedged(gt);
148 igt_spinner_end(&spin);
149 if (igt_flush_test(gt->i915)) {
155 intel_context_put(ce);
160 igt_spinner_fini(&spin);
164 static int live_unlite_restore(struct intel_gt *gt, int prio)
166 struct intel_engine_cs *engine;
167 enum intel_engine_id id;
168 struct igt_spinner spin;
172 * Check that we can correctly context switch between 2 instances
173 * on the same engine from the same parent context.
176 if (igt_spinner_init(&spin, gt))
180 for_each_engine(engine, gt, id) {
181 struct intel_context *ce[2] = {};
182 struct i915_request *rq[2];
183 struct igt_live_test t;
186 if (prio && !intel_engine_has_preemption(engine))
189 if (!intel_engine_can_store_dword(engine))
192 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
196 st_engine_heartbeat_disable(engine);
198 for (n = 0; n < ARRAY_SIZE(ce); n++) {
199 struct intel_context *tmp;
201 tmp = intel_context_create(engine);
207 err = intel_context_pin(tmp);
209 intel_context_put(tmp);
214 * Setup the pair of contexts such that if we
215 * lite-restore using the RING_TAIL from ce[1] it
216 * will execute garbage from ce[0]->ring.
218 memset(tmp->ring->vaddr,
219 POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */
220 tmp->ring->vma->size);
224 GEM_BUG_ON(!ce[1]->ring->size);
225 intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
226 lrc_update_regs(ce[1], engine, ce[1]->ring->head);
228 rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
230 err = PTR_ERR(rq[0]);
234 i915_request_get(rq[0]);
235 i915_request_add(rq[0]);
236 GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
238 if (!igt_wait_for_spinner(&spin, rq[0])) {
239 i915_request_put(rq[0]);
243 rq[1] = i915_request_create(ce[1]);
245 err = PTR_ERR(rq[1]);
246 i915_request_put(rq[0]);
252 * Ensure we do the switch to ce[1] on completion.
254 * rq[0] is already submitted, so this should reduce
255 * to a no-op (a wait on a request on the same engine
256 * uses the submit fence, not the completion fence),
257 * but it will install a dependency on rq[1] for rq[0]
258 * that will prevent the pair being reordered by
261 i915_request_await_dma_fence(rq[1], &rq[0]->fence);
264 i915_request_get(rq[1]);
265 i915_request_add(rq[1]);
266 GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
267 i915_request_put(rq[0]);
270 struct i915_sched_attr attr = {
274 /* Alternatively preempt the spinner with ce[1] */
275 engine->sched_engine->schedule(rq[1], &attr);
278 /* And switch back to ce[0] for good measure */
279 rq[0] = i915_request_create(ce[0]);
281 err = PTR_ERR(rq[0]);
282 i915_request_put(rq[1]);
286 i915_request_await_dma_fence(rq[0], &rq[1]->fence);
287 i915_request_get(rq[0]);
288 i915_request_add(rq[0]);
289 GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
290 i915_request_put(rq[1]);
291 i915_request_put(rq[0]);
294 intel_engine_flush_submission(engine);
295 igt_spinner_end(&spin);
296 for (n = 0; n < ARRAY_SIZE(ce); n++) {
297 if (IS_ERR_OR_NULL(ce[n]))
300 intel_context_unpin(ce[n]);
301 intel_context_put(ce[n]);
304 st_engine_heartbeat_enable(engine);
305 if (igt_live_test_end(&t))
311 igt_spinner_fini(&spin);
315 static int live_unlite_switch(void *arg)
317 return live_unlite_restore(arg, 0);
320 static int live_unlite_preempt(void *arg)
322 return live_unlite_restore(arg, I915_PRIORITY_MAX);
325 static int live_unlite_ring(void *arg)
327 struct intel_gt *gt = arg;
328 struct intel_engine_cs *engine;
329 struct igt_spinner spin;
330 enum intel_engine_id id;
334 * Setup a preemption event that will cause almost the entire ring
335 * to be unwound, potentially fooling our intel_ring_direction()
336 * into emitting a forward lite-restore instead of the rollback.
339 if (igt_spinner_init(&spin, gt))
342 for_each_engine(engine, gt, id) {
343 struct intel_context *ce[2] = {};
344 struct i915_request *rq;
345 struct igt_live_test t;
348 if (!intel_engine_has_preemption(engine))
351 if (!intel_engine_can_store_dword(engine))
354 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
358 st_engine_heartbeat_disable(engine);
360 for (n = 0; n < ARRAY_SIZE(ce); n++) {
361 struct intel_context *tmp;
363 tmp = intel_context_create(engine);
369 err = intel_context_pin(tmp);
371 intel_context_put(tmp);
375 memset32(tmp->ring->vaddr,
376 0xdeadbeef, /* trigger a hang if executed */
377 tmp->ring->vma->size / sizeof(u32));
382 /* Create max prio spinner, followed by N low prio nops */
383 rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
389 i915_request_get(rq);
390 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
391 i915_request_add(rq);
393 if (!igt_wait_for_spinner(&spin, rq)) {
394 intel_gt_set_wedged(gt);
395 i915_request_put(rq);
400 /* Fill the ring, until we will cause a wrap */
402 while (intel_ring_direction(ce[0]->ring,
404 ce[0]->ring->tail) <= 0) {
405 struct i915_request *tmp;
407 tmp = intel_context_create_request(ce[0]);
410 i915_request_put(rq);
414 i915_request_add(tmp);
415 intel_engine_flush_submission(engine);
418 intel_engine_flush_submission(engine);
419 pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
425 GEM_BUG_ON(intel_ring_direction(ce[0]->ring,
427 ce[0]->ring->tail) <= 0);
428 i915_request_put(rq);
430 /* Create a second ring to preempt the first ring after rq[0] */
431 rq = intel_context_create_request(ce[1]);
437 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
438 i915_request_get(rq);
439 i915_request_add(rq);
441 err = wait_for_submit(engine, rq, HZ / 2);
442 i915_request_put(rq);
444 pr_err("%s: preemption request was not submitted\n",
449 pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
451 ce[0]->ring->tail, ce[0]->ring->emit,
452 ce[1]->ring->tail, ce[1]->ring->emit);
455 intel_engine_flush_submission(engine);
456 igt_spinner_end(&spin);
457 for (n = 0; n < ARRAY_SIZE(ce); n++) {
458 if (IS_ERR_OR_NULL(ce[n]))
461 intel_context_unpin(ce[n]);
462 intel_context_put(ce[n]);
464 st_engine_heartbeat_enable(engine);
465 if (igt_live_test_end(&t))
471 igt_spinner_fini(&spin);
475 static int live_pin_rewind(void *arg)
477 struct intel_gt *gt = arg;
478 struct intel_engine_cs *engine;
479 enum intel_engine_id id;
483 * We have to be careful not to trust intel_ring too much, for example
484 * ring->head is updated upon retire which is out of sync with pinning
485 * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
486 * or else we risk writing an older, stale value.
488 * To simulate this, let's apply a bit of deliberate sabotague.
491 for_each_engine(engine, gt, id) {
492 struct intel_context *ce;
493 struct i915_request *rq;
494 struct intel_ring *ring;
495 struct igt_live_test t;
497 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
502 ce = intel_context_create(engine);
508 err = intel_context_pin(ce);
510 intel_context_put(ce);
514 /* Keep the context awake while we play games */
515 err = i915_active_acquire(&ce->active);
517 intel_context_unpin(ce);
518 intel_context_put(ce);
523 /* Poison the ring, and offset the next request from HEAD */
524 memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
525 ring->emit = ring->size / 2;
526 ring->tail = ring->emit;
527 GEM_BUG_ON(ring->head);
529 intel_context_unpin(ce);
531 /* Submit a simple nop request */
532 GEM_BUG_ON(intel_context_is_pinned(ce));
533 rq = intel_context_create_request(ce);
534 i915_active_release(&ce->active); /* e.g. async retire */
535 intel_context_put(ce);
540 GEM_BUG_ON(!rq->head);
541 i915_request_add(rq);
543 /* Expect not to hang! */
544 if (igt_live_test_end(&t)) {
553 static int engine_lock_reset_tasklet(struct intel_engine_cs *engine)
555 tasklet_disable(&engine->sched_engine->tasklet);
558 if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
559 &engine->gt->reset.flags)) {
561 tasklet_enable(&engine->sched_engine->tasklet);
563 intel_gt_set_wedged(engine->gt);
570 static void engine_unlock_reset_tasklet(struct intel_engine_cs *engine)
572 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
573 &engine->gt->reset.flags);
576 tasklet_enable(&engine->sched_engine->tasklet);
579 static int live_hold_reset(void *arg)
581 struct intel_gt *gt = arg;
582 struct intel_engine_cs *engine;
583 enum intel_engine_id id;
584 struct igt_spinner spin;
588 * In order to support offline error capture for fast preempt reset,
589 * we need to decouple the guilty request and ensure that it and its
590 * descendents are not executed while the capture is in progress.
593 if (!intel_has_reset_engine(gt))
596 if (igt_spinner_init(&spin, gt))
599 for_each_engine(engine, gt, id) {
600 struct intel_context *ce;
601 struct i915_request *rq;
603 ce = intel_context_create(engine);
609 st_engine_heartbeat_disable(engine);
611 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
616 i915_request_add(rq);
618 if (!igt_wait_for_spinner(&spin, rq)) {
619 intel_gt_set_wedged(gt);
624 /* We have our request executing, now remove it and reset */
626 err = engine_lock_reset_tasklet(engine);
630 engine->sched_engine->tasklet.callback(&engine->sched_engine->tasklet);
631 GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
633 i915_request_get(rq);
634 execlists_hold(engine, rq);
635 GEM_BUG_ON(!i915_request_on_hold(rq));
637 __intel_engine_reset_bh(engine, NULL);
638 GEM_BUG_ON(rq->fence.error != -EIO);
640 engine_unlock_reset_tasklet(engine);
642 /* Check that we do not resubmit the held request */
643 if (!i915_request_wait(rq, 0, HZ / 5)) {
644 pr_err("%s: on hold request completed!\n",
646 i915_request_put(rq);
650 GEM_BUG_ON(!i915_request_on_hold(rq));
652 /* But is resubmitted on release */
653 execlists_unhold(engine, rq);
654 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
655 pr_err("%s: held request did not complete!\n",
657 intel_gt_set_wedged(gt);
660 i915_request_put(rq);
663 st_engine_heartbeat_enable(engine);
664 intel_context_put(ce);
669 igt_spinner_fini(&spin);
673 static const char *error_repr(int err)
675 return err ? "bad" : "good";
678 static int live_error_interrupt(void *arg)
680 static const struct error_phase {
681 enum { GOOD = 0, BAD = -EIO } error[2];
686 { { GOOD, GOOD } }, /* sentinel */
688 struct intel_gt *gt = arg;
689 struct intel_engine_cs *engine;
690 enum intel_engine_id id;
693 * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning
694 * of invalid commands in user batches that will cause a GPU hang.
695 * This is a faster mechanism than using hangcheck/heartbeats, but
696 * only detects problems the HW knows about -- it will not warn when
699 * To verify our detection and reset, we throw some invalid commands
700 * at the HW and wait for the interrupt.
703 if (!intel_has_reset_engine(gt))
706 for_each_engine(engine, gt, id) {
707 const struct error_phase *p;
710 st_engine_heartbeat_disable(engine);
712 for (p = phases; p->error[0] != GOOD; p++) {
713 struct i915_request *client[ARRAY_SIZE(phases->error)];
717 memset(client, 0, sizeof(*client));
718 for (i = 0; i < ARRAY_SIZE(client); i++) {
719 struct intel_context *ce;
720 struct i915_request *rq;
722 ce = intel_context_create(engine);
728 rq = intel_context_create_request(ce);
729 intel_context_put(ce);
735 if (rq->engine->emit_init_breadcrumb) {
736 err = rq->engine->emit_init_breadcrumb(rq);
738 i915_request_add(rq);
743 cs = intel_ring_begin(rq, 2);
745 i915_request_add(rq);
758 client[i] = i915_request_get(rq);
759 i915_request_add(rq);
762 err = wait_for_submit(engine, client[0], HZ / 2);
764 pr_err("%s: first request did not start within time!\n",
770 for (i = 0; i < ARRAY_SIZE(client); i++) {
771 if (i915_request_wait(client[i], 0, HZ / 5) < 0)
772 pr_debug("%s: %s request incomplete!\n",
774 error_repr(p->error[i]));
776 if (!i915_request_started(client[i])) {
777 pr_err("%s: %s request not started!\n",
779 error_repr(p->error[i]));
784 /* Kick the tasklet to process the error */
785 intel_engine_flush_submission(engine);
786 if (client[i]->fence.error != p->error[i]) {
787 pr_err("%s: %s request (%s) with wrong error code: %d\n",
789 error_repr(p->error[i]),
790 i915_request_completed(client[i]) ? "completed" : "running",
791 client[i]->fence.error);
798 for (i = 0; i < ARRAY_SIZE(client); i++)
800 i915_request_put(client[i]);
802 pr_err("%s: failed at phase[%zd] { %d, %d }\n",
803 engine->name, p - phases,
804 p->error[0], p->error[1]);
809 st_engine_heartbeat_enable(engine);
811 intel_gt_set_wedged(gt);
820 emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
824 cs = intel_ring_begin(rq, 10);
828 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
830 *cs++ = MI_SEMAPHORE_WAIT |
831 MI_SEMAPHORE_GLOBAL_GTT |
833 MI_SEMAPHORE_SAD_NEQ_SDD;
835 *cs++ = i915_ggtt_offset(vma) + 4 * idx;
839 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
840 *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
850 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
852 intel_ring_advance(rq, cs);
856 static struct i915_request *
857 semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
859 struct intel_context *ce;
860 struct i915_request *rq;
863 ce = intel_context_create(engine);
867 rq = intel_context_create_request(ce);
872 if (rq->engine->emit_init_breadcrumb)
873 err = rq->engine->emit_init_breadcrumb(rq);
875 err = emit_semaphore_chain(rq, vma, idx);
877 i915_request_get(rq);
878 i915_request_add(rq);
883 intel_context_put(ce);
888 release_queue(struct intel_engine_cs *engine,
889 struct i915_vma *vma,
892 struct i915_sched_attr attr = {
895 struct i915_request *rq;
898 rq = intel_engine_create_kernel_request(engine);
902 cs = intel_ring_begin(rq, 4);
904 i915_request_add(rq);
908 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
909 *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
913 intel_ring_advance(rq, cs);
915 i915_request_get(rq);
916 i915_request_add(rq);
919 engine->sched_engine->schedule(rq, &attr);
920 local_bh_enable(); /* kick tasklet */
922 i915_request_put(rq);
928 slice_semaphore_queue(struct intel_engine_cs *outer,
929 struct i915_vma *vma,
932 struct intel_engine_cs *engine;
933 struct i915_request *head;
934 enum intel_engine_id id;
937 head = semaphore_queue(outer, vma, n++);
939 return PTR_ERR(head);
941 for_each_engine(engine, outer->gt, id) {
942 if (!intel_engine_has_preemption(engine))
945 for (i = 0; i < count; i++) {
946 struct i915_request *rq;
948 rq = semaphore_queue(engine, vma, n++);
954 i915_request_put(rq);
958 err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER);
962 if (i915_request_wait(head, 0,
963 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
964 pr_err("%s: Failed to slice along semaphore chain of length (%d, %d)!\n",
965 outer->name, count, n);
967 intel_gt_set_wedged(outer->gt);
972 i915_request_put(head);
976 static int live_timeslice_preempt(void *arg)
978 struct intel_gt *gt = arg;
979 struct drm_i915_gem_object *obj;
980 struct intel_engine_cs *engine;
981 enum intel_engine_id id;
982 struct i915_vma *vma;
987 * If a request takes too long, we would like to give other users
988 * a fair go on the GPU. In particular, users may create batches
989 * that wait upon external input, where that input may even be
990 * supplied by another GPU job. To avoid blocking forever, we
991 * need to preempt the current task and replace it with another
994 if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
997 obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
1001 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
1007 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
1008 if (IS_ERR(vaddr)) {
1009 err = PTR_ERR(vaddr);
1013 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
1017 err = i915_vma_sync(vma);
1021 for_each_engine(engine, gt, id) {
1022 if (!intel_engine_has_preemption(engine))
1025 memset(vaddr, 0, PAGE_SIZE);
1027 st_engine_heartbeat_disable(engine);
1028 err = slice_semaphore_queue(engine, vma, 5);
1029 st_engine_heartbeat_enable(engine);
1033 if (igt_flush_test(gt->i915)) {
1040 i915_vma_unpin(vma);
1042 i915_gem_object_unpin_map(obj);
1044 i915_gem_object_put(obj);
1048 static struct i915_request *
1049 create_rewinder(struct intel_context *ce,
1050 struct i915_request *wait,
1051 void *slot, int idx)
1054 i915_ggtt_offset(ce->engine->status_page.vma) +
1055 offset_in_page(slot);
1056 struct i915_request *rq;
1060 rq = intel_context_create_request(ce);
1065 err = i915_request_await_dma_fence(rq, &wait->fence);
1070 cs = intel_ring_begin(rq, 14);
1076 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1079 *cs++ = MI_SEMAPHORE_WAIT |
1080 MI_SEMAPHORE_GLOBAL_GTT |
1082 MI_SEMAPHORE_SAD_GTE_SDD;
1087 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
1088 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
1089 *cs++ = offset + idx * sizeof(u32);
1092 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1097 intel_ring_advance(rq, cs);
1101 i915_request_get(rq);
1102 i915_request_add(rq);
1104 i915_request_put(rq);
1105 return ERR_PTR(err);
1111 static int live_timeslice_rewind(void *arg)
1113 struct intel_gt *gt = arg;
1114 struct intel_engine_cs *engine;
1115 enum intel_engine_id id;
1118 * The usual presumption on timeslice expiration is that we replace
1119 * the active context with another. However, given a chain of
1120 * dependencies we may end up with replacing the context with itself,
1121 * but only a few of those requests, forcing us to rewind the
1122 * RING_TAIL of the original request.
1124 if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
1127 for_each_engine(engine, gt, id) {
1128 enum { A1, A2, B1 };
1129 enum { X = 1, Z, Y };
1130 struct i915_request *rq[3] = {};
1131 struct intel_context *ce;
1132 unsigned long timeslice;
1136 if (!intel_engine_has_timeslices(engine))
1140 * A:rq1 -- semaphore wait, timestamp X
1141 * A:rq2 -- write timestamp Y
1143 * B:rq1 [await A:rq1] -- write timestamp Z
1145 * Force timeslice, release semaphore.
1147 * Expect execution/evaluation order XZY
1150 st_engine_heartbeat_disable(engine);
1151 timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
1153 slot = memset32(engine->status_page.addr + 1000, 0, 4);
1155 ce = intel_context_create(engine);
1161 rq[A1] = create_rewinder(ce, NULL, slot, X);
1162 if (IS_ERR(rq[A1])) {
1163 intel_context_put(ce);
1167 rq[A2] = create_rewinder(ce, NULL, slot, Y);
1168 intel_context_put(ce);
1172 err = wait_for_submit(engine, rq[A2], HZ / 2);
1174 pr_err("%s: failed to submit first context\n",
1179 ce = intel_context_create(engine);
1185 rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
1186 intel_context_put(ce);
1190 err = wait_for_submit(engine, rq[B1], HZ / 2);
1192 pr_err("%s: failed to submit second context\n",
1197 /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
1198 ENGINE_TRACE(engine, "forcing tasklet for rewind\n");
1199 while (i915_request_is_active(rq[A2])) { /* semaphore yield! */
1200 /* Wait for the timeslice to kick in */
1201 del_timer(&engine->execlists.timer);
1202 tasklet_hi_schedule(&engine->sched_engine->tasklet);
1203 intel_engine_flush_submission(engine);
1205 /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
1206 GEM_BUG_ON(!i915_request_is_active(rq[A1]));
1207 GEM_BUG_ON(!i915_request_is_active(rq[B1]));
1208 GEM_BUG_ON(i915_request_is_active(rq[A2]));
1210 /* Release the hounds! */
1212 wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */
1214 for (i = 1; i <= 3; i++) {
1215 unsigned long timeout = jiffies + HZ / 2;
1217 while (!READ_ONCE(slot[i]) &&
1218 time_before(jiffies, timeout))
1221 if (!time_before(jiffies, timeout)) {
1222 pr_err("%s: rq[%d] timed out\n",
1223 engine->name, i - 1);
1228 pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]);
1232 if (slot[Z] - slot[X] >= slot[Y] - slot[X]) {
1233 pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n",
1241 memset32(&slot[0], -1, 4);
1244 engine->props.timeslice_duration_ms = timeslice;
1245 st_engine_heartbeat_enable(engine);
1246 for (i = 0; i < 3; i++)
1247 i915_request_put(rq[i]);
1248 if (igt_flush_test(gt->i915))
1257 static struct i915_request *nop_request(struct intel_engine_cs *engine)
1259 struct i915_request *rq;
1261 rq = intel_engine_create_kernel_request(engine);
1265 i915_request_get(rq);
1266 i915_request_add(rq);
1271 static long slice_timeout(struct intel_engine_cs *engine)
1275 /* Enough time for a timeslice to kick in, and kick out */
1276 timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine));
1278 /* Enough time for the nop request to complete */
1284 static int live_timeslice_queue(void *arg)
1286 struct intel_gt *gt = arg;
1287 struct drm_i915_gem_object *obj;
1288 struct intel_engine_cs *engine;
1289 enum intel_engine_id id;
1290 struct i915_vma *vma;
1295 * Make sure that even if ELSP[0] and ELSP[1] are filled with
1296 * timeslicing between them disabled, we *do* enable timeslicing
1297 * if the queue demands it. (Normally, we do not submit if
1298 * ELSP[1] is already occupied, so must rely on timeslicing to
1299 * eject ELSP[0] in favour of the queue.)
1301 if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
1304 obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
1306 return PTR_ERR(obj);
1308 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
1314 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
1315 if (IS_ERR(vaddr)) {
1316 err = PTR_ERR(vaddr);
1320 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
1324 err = i915_vma_sync(vma);
1328 for_each_engine(engine, gt, id) {
1329 struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
1330 struct i915_request *rq, *nop;
1332 if (!intel_engine_has_preemption(engine))
1335 st_engine_heartbeat_disable(engine);
1336 memset(vaddr, 0, PAGE_SIZE);
1338 /* ELSP[0]: semaphore wait */
1339 rq = semaphore_queue(engine, vma, 0);
1344 engine->sched_engine->schedule(rq, &attr);
1345 err = wait_for_submit(engine, rq, HZ / 2);
1347 pr_err("%s: Timed out trying to submit semaphores\n",
1352 /* ELSP[1]: nop request */
1353 nop = nop_request(engine);
1358 err = wait_for_submit(engine, nop, HZ / 2);
1359 i915_request_put(nop);
1361 pr_err("%s: Timed out trying to submit nop\n",
1366 GEM_BUG_ON(i915_request_completed(rq));
1367 GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
1369 /* Queue: semaphore signal, matching priority as semaphore */
1370 err = release_queue(engine, vma, 1, effective_prio(rq));
1374 /* Wait until we ack the release_queue and start timeslicing */
1377 intel_engine_flush_submission(engine);
1378 } while (READ_ONCE(engine->execlists.pending[0]));
1380 /* Timeslice every jiffy, so within 2 we should signal */
1381 if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
1382 struct drm_printer p =
1383 drm_info_printer(gt->i915->drm.dev);
1385 pr_err("%s: Failed to timeslice into queue\n",
1387 intel_engine_dump(engine, &p,
1388 "%s\n", engine->name);
1390 memset(vaddr, 0xff, PAGE_SIZE);
1394 i915_request_put(rq);
1396 st_engine_heartbeat_enable(engine);
1402 i915_vma_unpin(vma);
1404 i915_gem_object_unpin_map(obj);
1406 i915_gem_object_put(obj);
1410 static int live_timeslice_nopreempt(void *arg)
1412 struct intel_gt *gt = arg;
1413 struct intel_engine_cs *engine;
1414 enum intel_engine_id id;
1415 struct igt_spinner spin;
1419 * We should not timeslice into a request that is marked with
1420 * I915_REQUEST_NOPREEMPT.
1422 if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
1425 if (igt_spinner_init(&spin, gt))
1428 for_each_engine(engine, gt, id) {
1429 struct intel_context *ce;
1430 struct i915_request *rq;
1431 unsigned long timeslice;
1433 if (!intel_engine_has_preemption(engine))
1436 ce = intel_context_create(engine);
1442 st_engine_heartbeat_disable(engine);
1443 timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
1445 /* Create an unpreemptible spinner */
1447 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
1448 intel_context_put(ce);
1454 i915_request_get(rq);
1455 i915_request_add(rq);
1457 if (!igt_wait_for_spinner(&spin, rq)) {
1458 i915_request_put(rq);
1463 set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
1464 i915_request_put(rq);
1466 /* Followed by a maximum priority barrier (heartbeat) */
1468 ce = intel_context_create(engine);
1474 rq = intel_context_create_request(ce);
1475 intel_context_put(ce);
1481 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
1482 i915_request_get(rq);
1483 i915_request_add(rq);
1486 * Wait until the barrier is in ELSP, and we know timeslicing
1487 * will have been activated.
1489 if (wait_for_submit(engine, rq, HZ / 2)) {
1490 i915_request_put(rq);
1496 * Since the ELSP[0] request is unpreemptible, it should not
1497 * allow the maximum priority barrier through. Wait long
1498 * enough to see if it is timesliced in by mistake.
1500 if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) {
1501 pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n",
1505 i915_request_put(rq);
1508 igt_spinner_end(&spin);
1510 xchg(&engine->props.timeslice_duration_ms, timeslice);
1511 st_engine_heartbeat_enable(engine);
1515 if (igt_flush_test(gt->i915)) {
1521 igt_spinner_fini(&spin);
1525 static int live_busywait_preempt(void *arg)
1527 struct intel_gt *gt = arg;
1528 struct i915_gem_context *ctx_hi, *ctx_lo;
1529 struct intel_engine_cs *engine;
1530 struct drm_i915_gem_object *obj;
1531 struct i915_vma *vma;
1532 enum intel_engine_id id;
1537 * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
1538 * preempt the busywaits used to synchronise between rings.
1541 ctx_hi = kernel_context(gt->i915, NULL);
1543 return PTR_ERR(ctx_hi);
1545 ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
1547 ctx_lo = kernel_context(gt->i915, NULL);
1548 if (IS_ERR(ctx_lo)) {
1549 err = PTR_ERR(ctx_lo);
1553 ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
1555 obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
1561 map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
1567 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
1573 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
1577 err = i915_vma_sync(vma);
1581 for_each_engine(engine, gt, id) {
1582 struct i915_request *lo, *hi;
1583 struct igt_live_test t;
1586 if (!intel_engine_has_preemption(engine))
1589 if (!intel_engine_can_store_dword(engine))
1592 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
1598 * We create two requests. The low priority request
1599 * busywaits on a semaphore (inside the ringbuffer where
1600 * is should be preemptible) and the high priority requests
1601 * uses a MI_STORE_DWORD_IMM to update the semaphore value
1602 * allowing the first request to complete. If preemption
1603 * fails, we hang instead.
1606 lo = igt_request_alloc(ctx_lo, engine);
1612 cs = intel_ring_begin(lo, 8);
1615 i915_request_add(lo);
1619 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1620 *cs++ = i915_ggtt_offset(vma);
1624 /* XXX Do we need a flush + invalidate here? */
1626 *cs++ = MI_SEMAPHORE_WAIT |
1627 MI_SEMAPHORE_GLOBAL_GTT |
1629 MI_SEMAPHORE_SAD_EQ_SDD;
1631 *cs++ = i915_ggtt_offset(vma);
1634 intel_ring_advance(lo, cs);
1636 i915_request_get(lo);
1637 i915_request_add(lo);
1639 if (wait_for(READ_ONCE(*map), 10)) {
1640 i915_request_put(lo);
1645 /* Low priority request should be busywaiting now */
1646 if (i915_request_wait(lo, 0, 1) != -ETIME) {
1647 i915_request_put(lo);
1648 pr_err("%s: Busywaiting request did not!\n",
1654 hi = igt_request_alloc(ctx_hi, engine);
1657 i915_request_put(lo);
1661 cs = intel_ring_begin(hi, 4);
1664 i915_request_add(hi);
1665 i915_request_put(lo);
1669 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1670 *cs++ = i915_ggtt_offset(vma);
1674 intel_ring_advance(hi, cs);
1675 i915_request_add(hi);
1677 if (i915_request_wait(lo, 0, HZ / 5) < 0) {
1678 struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1680 pr_err("%s: Failed to preempt semaphore busywait!\n",
1683 intel_engine_dump(engine, &p, "%s\n", engine->name);
1686 i915_request_put(lo);
1687 intel_gt_set_wedged(gt);
1691 GEM_BUG_ON(READ_ONCE(*map));
1692 i915_request_put(lo);
1694 if (igt_live_test_end(&t)) {
1702 i915_vma_unpin(vma);
1704 i915_gem_object_unpin_map(obj);
1706 i915_gem_object_put(obj);
1708 kernel_context_close(ctx_lo);
1710 kernel_context_close(ctx_hi);
1714 static struct i915_request *
1715 spinner_create_request(struct igt_spinner *spin,
1716 struct i915_gem_context *ctx,
1717 struct intel_engine_cs *engine,
1720 struct intel_context *ce;
1721 struct i915_request *rq;
1723 ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
1725 return ERR_CAST(ce);
1727 rq = igt_spinner_create_request(spin, ce, arb);
1728 intel_context_put(ce);
1732 static int live_preempt(void *arg)
1734 struct intel_gt *gt = arg;
1735 struct i915_gem_context *ctx_hi, *ctx_lo;
1736 struct igt_spinner spin_hi, spin_lo;
1737 struct intel_engine_cs *engine;
1738 enum intel_engine_id id;
1741 ctx_hi = kernel_context(gt->i915, NULL);
1744 ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
1746 ctx_lo = kernel_context(gt->i915, NULL);
1749 ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
1751 if (igt_spinner_init(&spin_hi, gt))
1754 if (igt_spinner_init(&spin_lo, gt))
1757 for_each_engine(engine, gt, id) {
1758 struct igt_live_test t;
1759 struct i915_request *rq;
1761 if (!intel_engine_has_preemption(engine))
1764 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
1769 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
1776 i915_request_add(rq);
1777 if (!igt_wait_for_spinner(&spin_lo, rq)) {
1778 GEM_TRACE("lo spinner failed to start\n");
1780 intel_gt_set_wedged(gt);
1785 rq = spinner_create_request(&spin_hi, ctx_hi, engine,
1788 igt_spinner_end(&spin_lo);
1793 i915_request_add(rq);
1794 if (!igt_wait_for_spinner(&spin_hi, rq)) {
1795 GEM_TRACE("hi spinner failed to start\n");
1797 intel_gt_set_wedged(gt);
1802 igt_spinner_end(&spin_hi);
1803 igt_spinner_end(&spin_lo);
1805 if (igt_live_test_end(&t)) {
1813 igt_spinner_fini(&spin_lo);
1815 igt_spinner_fini(&spin_hi);
1817 kernel_context_close(ctx_lo);
1819 kernel_context_close(ctx_hi);
1823 static int live_late_preempt(void *arg)
1825 struct intel_gt *gt = arg;
1826 struct i915_gem_context *ctx_hi, *ctx_lo;
1827 struct igt_spinner spin_hi, spin_lo;
1828 struct intel_engine_cs *engine;
1829 struct i915_sched_attr attr = {};
1830 enum intel_engine_id id;
1833 ctx_hi = kernel_context(gt->i915, NULL);
1837 ctx_lo = kernel_context(gt->i915, NULL);
1841 if (igt_spinner_init(&spin_hi, gt))
1844 if (igt_spinner_init(&spin_lo, gt))
1847 /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
1848 ctx_lo->sched.priority = 1;
1850 for_each_engine(engine, gt, id) {
1851 struct igt_live_test t;
1852 struct i915_request *rq;
1854 if (!intel_engine_has_preemption(engine))
1857 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
1862 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
1869 i915_request_add(rq);
1870 if (!igt_wait_for_spinner(&spin_lo, rq)) {
1871 pr_err("First context failed to start\n");
1875 rq = spinner_create_request(&spin_hi, ctx_hi, engine,
1878 igt_spinner_end(&spin_lo);
1883 i915_request_add(rq);
1884 if (igt_wait_for_spinner(&spin_hi, rq)) {
1885 pr_err("Second context overtook first?\n");
1889 attr.priority = I915_PRIORITY_MAX;
1890 engine->sched_engine->schedule(rq, &attr);
1892 if (!igt_wait_for_spinner(&spin_hi, rq)) {
1893 pr_err("High priority context failed to preempt the low priority context\n");
1898 igt_spinner_end(&spin_hi);
1899 igt_spinner_end(&spin_lo);
1901 if (igt_live_test_end(&t)) {
1909 igt_spinner_fini(&spin_lo);
1911 igt_spinner_fini(&spin_hi);
1913 kernel_context_close(ctx_lo);
1915 kernel_context_close(ctx_hi);
1919 igt_spinner_end(&spin_hi);
1920 igt_spinner_end(&spin_lo);
1921 intel_gt_set_wedged(gt);
1926 struct preempt_client {
1927 struct igt_spinner spin;
1928 struct i915_gem_context *ctx;
1931 static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
1933 c->ctx = kernel_context(gt->i915, NULL);
1937 if (igt_spinner_init(&c->spin, gt))
1943 kernel_context_close(c->ctx);
1947 static void preempt_client_fini(struct preempt_client *c)
1949 igt_spinner_fini(&c->spin);
1950 kernel_context_close(c->ctx);
1953 static int live_nopreempt(void *arg)
1955 struct intel_gt *gt = arg;
1956 struct intel_engine_cs *engine;
1957 struct preempt_client a, b;
1958 enum intel_engine_id id;
1962 * Verify that we can disable preemption for an individual request
1963 * that may be being observed and not want to be interrupted.
1966 if (preempt_client_init(gt, &a))
1968 if (preempt_client_init(gt, &b))
1970 b.ctx->sched.priority = I915_PRIORITY_MAX;
1972 for_each_engine(engine, gt, id) {
1973 struct i915_request *rq_a, *rq_b;
1975 if (!intel_engine_has_preemption(engine))
1978 engine->execlists.preempt_hang.count = 0;
1980 rq_a = spinner_create_request(&a.spin,
1984 err = PTR_ERR(rq_a);
1988 /* Low priority client, but unpreemptable! */
1989 __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
1991 i915_request_add(rq_a);
1992 if (!igt_wait_for_spinner(&a.spin, rq_a)) {
1993 pr_err("First client failed to start\n");
1997 rq_b = spinner_create_request(&b.spin,
2001 err = PTR_ERR(rq_b);
2005 i915_request_add(rq_b);
2007 /* B is much more important than A! (But A is unpreemptable.) */
2008 GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
2010 /* Wait long enough for preemption and timeslicing */
2011 if (igt_wait_for_spinner(&b.spin, rq_b)) {
2012 pr_err("Second client started too early!\n");
2016 igt_spinner_end(&a.spin);
2018 if (!igt_wait_for_spinner(&b.spin, rq_b)) {
2019 pr_err("Second client failed to start\n");
2023 igt_spinner_end(&b.spin);
2025 if (engine->execlists.preempt_hang.count) {
2026 pr_err("Preemption recorded x%d; should have been suppressed!\n",
2027 engine->execlists.preempt_hang.count);
2032 if (igt_flush_test(gt->i915))
2038 preempt_client_fini(&b);
2040 preempt_client_fini(&a);
2044 igt_spinner_end(&b.spin);
2045 igt_spinner_end(&a.spin);
2046 intel_gt_set_wedged(gt);
2051 struct live_preempt_cancel {
2052 struct intel_engine_cs *engine;
2053 struct preempt_client a, b;
2056 static int __cancel_active0(struct live_preempt_cancel *arg)
2058 struct i915_request *rq;
2059 struct igt_live_test t;
2062 /* Preempt cancel of ELSP0 */
2063 GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
2064 if (igt_live_test_begin(&t, arg->engine->i915,
2065 __func__, arg->engine->name))
2068 rq = spinner_create_request(&arg->a.spin,
2069 arg->a.ctx, arg->engine,
2074 clear_bit(CONTEXT_BANNED, &rq->context->flags);
2075 i915_request_get(rq);
2076 i915_request_add(rq);
2077 if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
2082 intel_context_ban(rq->context, rq);
2083 err = intel_engine_pulse(arg->engine);
2087 err = wait_for_reset(arg->engine, rq, HZ / 2);
2089 pr_err("Cancelled inflight0 request did not reset\n");
2094 i915_request_put(rq);
2095 if (igt_live_test_end(&t))
2100 static int __cancel_active1(struct live_preempt_cancel *arg)
2102 struct i915_request *rq[2] = {};
2103 struct igt_live_test t;
2106 /* Preempt cancel of ELSP1 */
2107 GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
2108 if (igt_live_test_begin(&t, arg->engine->i915,
2109 __func__, arg->engine->name))
2112 rq[0] = spinner_create_request(&arg->a.spin,
2113 arg->a.ctx, arg->engine,
2114 MI_NOOP); /* no preemption */
2116 return PTR_ERR(rq[0]);
2118 clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
2119 i915_request_get(rq[0]);
2120 i915_request_add(rq[0]);
2121 if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
2126 rq[1] = spinner_create_request(&arg->b.spin,
2127 arg->b.ctx, arg->engine,
2129 if (IS_ERR(rq[1])) {
2130 err = PTR_ERR(rq[1]);
2134 clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
2135 i915_request_get(rq[1]);
2136 err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
2137 i915_request_add(rq[1]);
2141 intel_context_ban(rq[1]->context, rq[1]);
2142 err = intel_engine_pulse(arg->engine);
2146 igt_spinner_end(&arg->a.spin);
2147 err = wait_for_reset(arg->engine, rq[1], HZ / 2);
2151 if (rq[0]->fence.error != 0) {
2152 pr_err("Normal inflight0 request did not complete\n");
2157 if (rq[1]->fence.error != -EIO) {
2158 pr_err("Cancelled inflight1 request did not report -EIO\n");
2164 i915_request_put(rq[1]);
2165 i915_request_put(rq[0]);
2166 if (igt_live_test_end(&t))
2171 static int __cancel_queued(struct live_preempt_cancel *arg)
2173 struct i915_request *rq[3] = {};
2174 struct igt_live_test t;
2177 /* Full ELSP and one in the wings */
2178 GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
2179 if (igt_live_test_begin(&t, arg->engine->i915,
2180 __func__, arg->engine->name))
2183 rq[0] = spinner_create_request(&arg->a.spin,
2184 arg->a.ctx, arg->engine,
2187 return PTR_ERR(rq[0]);
2189 clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
2190 i915_request_get(rq[0]);
2191 i915_request_add(rq[0]);
2192 if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
2197 rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
2198 if (IS_ERR(rq[1])) {
2199 err = PTR_ERR(rq[1]);
2203 clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
2204 i915_request_get(rq[1]);
2205 err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
2206 i915_request_add(rq[1]);
2210 rq[2] = spinner_create_request(&arg->b.spin,
2211 arg->a.ctx, arg->engine,
2213 if (IS_ERR(rq[2])) {
2214 err = PTR_ERR(rq[2]);
2218 i915_request_get(rq[2]);
2219 err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
2220 i915_request_add(rq[2]);
2224 intel_context_ban(rq[2]->context, rq[2]);
2225 err = intel_engine_pulse(arg->engine);
2229 err = wait_for_reset(arg->engine, rq[2], HZ / 2);
2233 if (rq[0]->fence.error != -EIO) {
2234 pr_err("Cancelled inflight0 request did not report -EIO\n");
2240 * The behavior between having semaphores and not is different. With
2241 * semaphores the subsequent request is on the hardware and not cancelled
2242 * while without the request is held in the driver and cancelled.
2244 if (intel_engine_has_semaphores(rq[1]->engine) &&
2245 rq[1]->fence.error != 0) {
2246 pr_err("Normal inflight1 request did not complete\n");
2251 if (rq[2]->fence.error != -EIO) {
2252 pr_err("Cancelled queued request did not report -EIO\n");
2258 i915_request_put(rq[2]);
2259 i915_request_put(rq[1]);
2260 i915_request_put(rq[0]);
2261 if (igt_live_test_end(&t))
2266 static int __cancel_hostile(struct live_preempt_cancel *arg)
2268 struct i915_request *rq;
2271 /* Preempt cancel non-preemptible spinner in ELSP0 */
2272 if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
2275 if (!intel_has_reset_engine(arg->engine->gt))
2278 GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
2279 rq = spinner_create_request(&arg->a.spin,
2280 arg->a.ctx, arg->engine,
2281 MI_NOOP); /* preemption disabled */
2285 clear_bit(CONTEXT_BANNED, &rq->context->flags);
2286 i915_request_get(rq);
2287 i915_request_add(rq);
2288 if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
2293 intel_context_ban(rq->context, rq);
2294 err = intel_engine_pulse(arg->engine); /* force reset */
2298 err = wait_for_reset(arg->engine, rq, HZ / 2);
2300 pr_err("Cancelled inflight0 request did not reset\n");
2305 i915_request_put(rq);
2306 if (igt_flush_test(arg->engine->i915))
2311 static void force_reset_timeout(struct intel_engine_cs *engine)
2313 engine->reset_timeout.probability = 999;
2314 atomic_set(&engine->reset_timeout.times, -1);
2317 static void cancel_reset_timeout(struct intel_engine_cs *engine)
2319 memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
2322 static int __cancel_fail(struct live_preempt_cancel *arg)
2324 struct intel_engine_cs *engine = arg->engine;
2325 struct i915_request *rq;
2328 if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
2331 if (!intel_has_reset_engine(engine->gt))
2334 GEM_TRACE("%s(%s)\n", __func__, engine->name);
2335 rq = spinner_create_request(&arg->a.spin,
2337 MI_NOOP); /* preemption disabled */
2341 clear_bit(CONTEXT_BANNED, &rq->context->flags);
2342 i915_request_get(rq);
2343 i915_request_add(rq);
2344 if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
2349 intel_context_set_banned(rq->context);
2351 err = intel_engine_pulse(engine);
2355 force_reset_timeout(engine);
2357 /* force preempt reset [failure] */
2358 while (!engine->execlists.pending[0])
2359 intel_engine_flush_submission(engine);
2360 del_timer_sync(&engine->execlists.preempt);
2361 intel_engine_flush_submission(engine);
2363 cancel_reset_timeout(engine);
2365 /* after failure, require heartbeats to reset device */
2366 intel_engine_set_heartbeat(engine, 1);
2367 err = wait_for_reset(engine, rq, HZ / 2);
2368 intel_engine_set_heartbeat(engine,
2369 engine->defaults.heartbeat_interval_ms);
2371 pr_err("Cancelled inflight0 request did not reset\n");
2376 i915_request_put(rq);
2377 if (igt_flush_test(engine->i915))
2382 static int live_preempt_cancel(void *arg)
2384 struct intel_gt *gt = arg;
2385 struct live_preempt_cancel data;
2386 enum intel_engine_id id;
2390 * To cancel an inflight context, we need to first remove it from the
2391 * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
2394 if (preempt_client_init(gt, &data.a))
2396 if (preempt_client_init(gt, &data.b))
2399 for_each_engine(data.engine, gt, id) {
2400 if (!intel_engine_has_preemption(data.engine))
2403 err = __cancel_active0(&data);
2407 err = __cancel_active1(&data);
2411 err = __cancel_queued(&data);
2415 err = __cancel_hostile(&data);
2419 err = __cancel_fail(&data);
2426 preempt_client_fini(&data.b);
2428 preempt_client_fini(&data.a);
2433 igt_spinner_end(&data.b.spin);
2434 igt_spinner_end(&data.a.spin);
2435 intel_gt_set_wedged(gt);
2439 static int live_suppress_self_preempt(void *arg)
2441 struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
2442 struct intel_gt *gt = arg;
2443 struct intel_engine_cs *engine;
2444 struct preempt_client a, b;
2445 enum intel_engine_id id;
2449 * Verify that if a preemption request does not cause a change in
2450 * the current execution order, the preempt-to-idle injection is
2451 * skipped and that we do not accidentally apply it after the CS
2455 if (intel_uc_uses_guc_submission(>->uc))
2456 return 0; /* presume black blox */
2458 if (intel_vgpu_active(gt->i915))
2459 return 0; /* GVT forces single port & request submission */
2461 if (preempt_client_init(gt, &a))
2463 if (preempt_client_init(gt, &b))
2466 for_each_engine(engine, gt, id) {
2467 struct i915_request *rq_a, *rq_b;
2470 if (!intel_engine_has_preemption(engine))
2473 if (igt_flush_test(gt->i915))
2476 st_engine_heartbeat_disable(engine);
2477 engine->execlists.preempt_hang.count = 0;
2479 rq_a = spinner_create_request(&a.spin,
2483 err = PTR_ERR(rq_a);
2484 st_engine_heartbeat_enable(engine);
2488 i915_request_add(rq_a);
2489 if (!igt_wait_for_spinner(&a.spin, rq_a)) {
2490 pr_err("First client failed to start\n");
2491 st_engine_heartbeat_enable(engine);
2495 /* Keep postponing the timer to avoid premature slicing */
2496 mod_timer(&engine->execlists.timer, jiffies + HZ);
2497 for (depth = 0; depth < 8; depth++) {
2498 rq_b = spinner_create_request(&b.spin,
2502 err = PTR_ERR(rq_b);
2503 st_engine_heartbeat_enable(engine);
2506 i915_request_add(rq_b);
2508 GEM_BUG_ON(i915_request_completed(rq_a));
2509 engine->sched_engine->schedule(rq_a, &attr);
2510 igt_spinner_end(&a.spin);
2512 if (!igt_wait_for_spinner(&b.spin, rq_b)) {
2513 pr_err("Second client failed to start\n");
2514 st_engine_heartbeat_enable(engine);
2521 igt_spinner_end(&a.spin);
2523 if (engine->execlists.preempt_hang.count) {
2524 pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
2526 engine->execlists.preempt_hang.count,
2528 st_engine_heartbeat_enable(engine);
2533 st_engine_heartbeat_enable(engine);
2534 if (igt_flush_test(gt->i915))
2540 preempt_client_fini(&b);
2542 preempt_client_fini(&a);
2546 igt_spinner_end(&b.spin);
2547 igt_spinner_end(&a.spin);
2548 intel_gt_set_wedged(gt);
2553 static int live_chain_preempt(void *arg)
2555 struct intel_gt *gt = arg;
2556 struct intel_engine_cs *engine;
2557 struct preempt_client hi, lo;
2558 enum intel_engine_id id;
2562 * Build a chain AB...BA between two contexts (A, B) and request
2563 * preemption of the last request. It should then complete before
2564 * the previously submitted spinner in B.
2567 if (preempt_client_init(gt, &hi))
2570 if (preempt_client_init(gt, &lo))
2573 for_each_engine(engine, gt, id) {
2574 struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
2575 struct igt_live_test t;
2576 struct i915_request *rq;
2577 int ring_size, count, i;
2579 if (!intel_engine_has_preemption(engine))
2582 rq = spinner_create_request(&lo.spin,
2588 i915_request_get(rq);
2589 i915_request_add(rq);
2591 ring_size = rq->wa_tail - rq->head;
2593 ring_size += rq->ring->size;
2594 ring_size = rq->ring->size / ring_size;
2595 pr_debug("%s(%s): Using maximum of %d requests\n",
2596 __func__, engine->name, ring_size);
2598 igt_spinner_end(&lo.spin);
2599 if (i915_request_wait(rq, 0, HZ / 2) < 0) {
2600 pr_err("Timed out waiting to flush %s\n", engine->name);
2601 i915_request_put(rq);
2604 i915_request_put(rq);
2606 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
2611 for_each_prime_number_from(count, 1, ring_size) {
2612 rq = spinner_create_request(&hi.spin,
2617 i915_request_add(rq);
2618 if (!igt_wait_for_spinner(&hi.spin, rq))
2621 rq = spinner_create_request(&lo.spin,
2626 i915_request_add(rq);
2628 for (i = 0; i < count; i++) {
2629 rq = igt_request_alloc(lo.ctx, engine);
2632 i915_request_add(rq);
2635 rq = igt_request_alloc(hi.ctx, engine);
2639 i915_request_get(rq);
2640 i915_request_add(rq);
2641 engine->sched_engine->schedule(rq, &attr);
2643 igt_spinner_end(&hi.spin);
2644 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
2645 struct drm_printer p =
2646 drm_info_printer(gt->i915->drm.dev);
2648 pr_err("Failed to preempt over chain of %d\n",
2650 intel_engine_dump(engine, &p,
2651 "%s\n", engine->name);
2652 i915_request_put(rq);
2655 igt_spinner_end(&lo.spin);
2656 i915_request_put(rq);
2658 rq = igt_request_alloc(lo.ctx, engine);
2662 i915_request_get(rq);
2663 i915_request_add(rq);
2665 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
2666 struct drm_printer p =
2667 drm_info_printer(gt->i915->drm.dev);
2669 pr_err("Failed to flush low priority chain of %d requests\n",
2671 intel_engine_dump(engine, &p,
2672 "%s\n", engine->name);
2674 i915_request_put(rq);
2677 i915_request_put(rq);
2680 if (igt_live_test_end(&t)) {
2688 preempt_client_fini(&lo);
2690 preempt_client_fini(&hi);
2694 igt_spinner_end(&hi.spin);
2695 igt_spinner_end(&lo.spin);
2696 intel_gt_set_wedged(gt);
2701 static int create_gang(struct intel_engine_cs *engine,
2702 struct i915_request **prev)
2704 struct drm_i915_gem_object *obj;
2705 struct intel_context *ce;
2706 struct i915_request *rq;
2707 struct i915_vma *vma;
2711 ce = intel_context_create(engine);
2715 obj = i915_gem_object_create_internal(engine->i915, 4096);
2721 vma = i915_vma_instance(obj, ce->vm, NULL);
2727 err = i915_vma_pin(vma, 0, 0, PIN_USER);
2731 cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
2737 /* Semaphore target: spin until zero */
2738 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
2740 *cs++ = MI_SEMAPHORE_WAIT |
2742 MI_SEMAPHORE_SAD_EQ_SDD;
2744 *cs++ = lower_32_bits(i915_vma_offset(vma));
2745 *cs++ = upper_32_bits(i915_vma_offset(vma));
2748 u64 offset = i915_vma_offset((*prev)->batch);
2750 /* Terminate the spinner in the next lower priority batch. */
2751 *cs++ = MI_STORE_DWORD_IMM_GEN4;
2752 *cs++ = lower_32_bits(offset);
2753 *cs++ = upper_32_bits(offset);
2757 *cs++ = MI_BATCH_BUFFER_END;
2758 i915_gem_object_flush_map(obj);
2759 i915_gem_object_unpin_map(obj);
2761 rq = intel_context_create_request(ce);
2767 rq->batch = i915_vma_get(vma);
2768 i915_request_get(rq);
2770 err = igt_vma_move_to_active_unlocked(vma, rq, 0);
2772 err = rq->engine->emit_bb_start(rq,
2773 i915_vma_offset(vma),
2775 i915_request_add(rq);
2779 i915_gem_object_put(obj);
2780 intel_context_put(ce);
2782 rq->mock.link.next = &(*prev)->mock.link;
2787 i915_vma_put(rq->batch);
2788 i915_request_put(rq);
2790 i915_gem_object_put(obj);
2792 intel_context_put(ce);
2796 static int __live_preempt_ring(struct intel_engine_cs *engine,
2797 struct igt_spinner *spin,
2798 int queue_sz, int ring_sz)
2800 struct intel_context *ce[2] = {};
2801 struct i915_request *rq;
2802 struct igt_live_test t;
2806 if (igt_live_test_begin(&t, engine->i915, __func__, engine->name))
2809 for (n = 0; n < ARRAY_SIZE(ce); n++) {
2810 struct intel_context *tmp;
2812 tmp = intel_context_create(engine);
2818 tmp->ring_size = ring_sz;
2820 err = intel_context_pin(tmp);
2822 intel_context_put(tmp);
2826 memset32(tmp->ring->vaddr,
2827 0xdeadbeef, /* trigger a hang if executed */
2828 tmp->ring->vma->size / sizeof(u32));
2833 rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
2839 i915_request_get(rq);
2840 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
2841 i915_request_add(rq);
2843 if (!igt_wait_for_spinner(spin, rq)) {
2844 intel_gt_set_wedged(engine->gt);
2845 i915_request_put(rq);
2850 /* Fill the ring, until we will cause a wrap */
2852 while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
2853 struct i915_request *tmp;
2855 tmp = intel_context_create_request(ce[0]);
2858 i915_request_put(rq);
2862 i915_request_add(tmp);
2863 intel_engine_flush_submission(engine);
2866 intel_engine_flush_submission(engine);
2867 pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
2868 engine->name, queue_sz, n,
2873 i915_request_put(rq);
2875 /* Create a second request to preempt the first ring */
2876 rq = intel_context_create_request(ce[1]);
2882 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
2883 i915_request_get(rq);
2884 i915_request_add(rq);
2886 err = wait_for_submit(engine, rq, HZ / 2);
2887 i915_request_put(rq);
2889 pr_err("%s: preemption request was not submitted\n",
2894 pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
2896 ce[0]->ring->tail, ce[0]->ring->emit,
2897 ce[1]->ring->tail, ce[1]->ring->emit);
2900 intel_engine_flush_submission(engine);
2901 igt_spinner_end(spin);
2902 for (n = 0; n < ARRAY_SIZE(ce); n++) {
2903 if (IS_ERR_OR_NULL(ce[n]))
2906 intel_context_unpin(ce[n]);
2907 intel_context_put(ce[n]);
2909 if (igt_live_test_end(&t))
2914 static int live_preempt_ring(void *arg)
2916 struct intel_gt *gt = arg;
2917 struct intel_engine_cs *engine;
2918 struct igt_spinner spin;
2919 enum intel_engine_id id;
2923 * Check that we rollback large chunks of a ring in order to do a
2924 * preemption event. Similar to live_unlite_ring, but looking at
2925 * ring size rather than the impact of intel_ring_direction().
2928 if (igt_spinner_init(&spin, gt))
2931 for_each_engine(engine, gt, id) {
2934 if (!intel_engine_has_preemption(engine))
2937 if (!intel_engine_can_store_dword(engine))
2940 st_engine_heartbeat_disable(engine);
2942 for (n = 0; n <= 3; n++) {
2943 err = __live_preempt_ring(engine, &spin,
2944 n * SZ_4K / 4, SZ_4K);
2949 st_engine_heartbeat_enable(engine);
2954 igt_spinner_fini(&spin);
2958 static int live_preempt_gang(void *arg)
2960 struct intel_gt *gt = arg;
2961 struct intel_engine_cs *engine;
2962 enum intel_engine_id id;
2965 * Build as long a chain of preempters as we can, with each
2966 * request higher priority than the last. Once we are ready, we release
2967 * the last batch which then precolates down the chain, each releasing
2968 * the next oldest in turn. The intent is to simply push as hard as we
2969 * can with the number of preemptions, trying to exceed narrow HW
2970 * limits. At a minimum, we insist that we can sort all the user
2971 * high priority levels into execution order.
2974 for_each_engine(engine, gt, id) {
2975 struct i915_request *rq = NULL;
2976 struct igt_live_test t;
2977 IGT_TIMEOUT(end_time);
2982 if (!intel_engine_has_preemption(engine))
2985 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
2989 struct i915_sched_attr attr = { .priority = prio++ };
2991 err = create_gang(engine, &rq);
2995 /* Submit each spinner at increasing priority */
2996 engine->sched_engine->schedule(rq, &attr);
2997 } while (prio <= I915_PRIORITY_MAX &&
2998 !__igt_timeout(end_time, NULL));
2999 pr_debug("%s: Preempt chain of %d requests\n",
3000 engine->name, prio);
3003 * Such that the last spinner is the highest priority and
3004 * should execute first. When that spinner completes,
3005 * it will terminate the next lowest spinner until there
3006 * are no more spinners and the gang is complete.
3008 cs = i915_gem_object_pin_map_unlocked(rq->batch->obj, I915_MAP_WC);
3011 i915_gem_object_unpin_map(rq->batch->obj);
3014 intel_gt_set_wedged(gt);
3017 while (rq) { /* wait for each rq from highest to lowest prio */
3018 struct i915_request *n = list_next_entry(rq, mock.link);
3020 if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
3021 struct drm_printer p =
3022 drm_info_printer(engine->i915->drm.dev);
3024 pr_err("Failed to flush chain of %d requests, at %d\n",
3026 intel_engine_dump(engine, &p,
3027 "%s\n", engine->name);
3032 i915_vma_put(rq->batch);
3033 i915_request_put(rq);
3037 if (igt_live_test_end(&t))
3046 static struct i915_vma *
3047 create_gpr_user(struct intel_engine_cs *engine,
3048 struct i915_vma *result,
3049 unsigned int offset)
3051 struct drm_i915_gem_object *obj;
3052 struct i915_vma *vma;
3057 obj = i915_gem_object_create_internal(engine->i915, 4096);
3059 return ERR_CAST(obj);
3061 vma = i915_vma_instance(obj, result->vm, NULL);
3063 i915_gem_object_put(obj);
3067 err = i915_vma_pin(vma, 0, 0, PIN_USER);
3070 return ERR_PTR(err);
3073 cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
3076 return ERR_CAST(cs);
3079 /* All GPR are clear for new contexts. We use GPR(0) as a constant */
3080 *cs++ = MI_LOAD_REGISTER_IMM(1);
3081 *cs++ = CS_GPR(engine, 0);
3084 for (i = 1; i < NUM_GPR; i++) {
3090 * As we read and write into the context saved GPR[i], if
3091 * we restart this batch buffer from an earlier point, we
3092 * will repeat the increment and store a value > 1.
3095 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i));
3096 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0));
3097 *cs++ = MI_MATH_ADD;
3098 *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
3100 addr = i915_vma_offset(result) + offset + i * sizeof(*cs);
3101 *cs++ = MI_STORE_REGISTER_MEM_GEN8;
3102 *cs++ = CS_GPR(engine, 2 * i);
3103 *cs++ = lower_32_bits(addr);
3104 *cs++ = upper_32_bits(addr);
3106 *cs++ = MI_SEMAPHORE_WAIT |
3108 MI_SEMAPHORE_SAD_GTE_SDD;
3110 *cs++ = lower_32_bits(i915_vma_offset(result));
3111 *cs++ = upper_32_bits(i915_vma_offset(result));
3114 *cs++ = MI_BATCH_BUFFER_END;
3115 i915_gem_object_flush_map(obj);
3116 i915_gem_object_unpin_map(obj);
3121 static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
3123 struct drm_i915_gem_object *obj;
3124 struct i915_vma *vma;
3127 obj = i915_gem_object_create_internal(gt->i915, sz);
3129 return ERR_CAST(obj);
3131 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
3133 i915_gem_object_put(obj);
3137 err = i915_ggtt_pin(vma, NULL, 0, 0);
3140 return ERR_PTR(err);
3146 static struct i915_request *
3147 create_gpr_client(struct intel_engine_cs *engine,
3148 struct i915_vma *global,
3149 unsigned int offset)
3151 struct i915_vma *batch, *vma;
3152 struct intel_context *ce;
3153 struct i915_request *rq;
3156 ce = intel_context_create(engine);
3158 return ERR_CAST(ce);
3160 vma = i915_vma_instance(global->obj, ce->vm, NULL);
3166 err = i915_vma_pin(vma, 0, 0, PIN_USER);
3170 batch = create_gpr_user(engine, vma, offset);
3171 if (IS_ERR(batch)) {
3172 err = PTR_ERR(batch);
3176 rq = intel_context_create_request(ce);
3182 err = igt_vma_move_to_active_unlocked(vma, rq, 0);
3184 i915_vma_lock(batch);
3186 err = i915_vma_move_to_active(batch, rq, 0);
3188 err = rq->engine->emit_bb_start(rq,
3189 i915_vma_offset(batch),
3191 i915_vma_unlock(batch);
3192 i915_vma_unpin(batch);
3195 i915_request_get(rq);
3196 i915_request_add(rq);
3199 i915_vma_put(batch);
3201 i915_vma_unpin(vma);
3203 intel_context_put(ce);
3204 return err ? ERR_PTR(err) : rq;
3207 static int preempt_user(struct intel_engine_cs *engine,
3208 struct i915_vma *global,
3211 struct i915_sched_attr attr = {
3212 .priority = I915_PRIORITY_MAX
3214 struct i915_request *rq;
3218 rq = intel_engine_create_kernel_request(engine);
3222 cs = intel_ring_begin(rq, 4);
3224 i915_request_add(rq);
3228 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
3229 *cs++ = i915_ggtt_offset(global);
3233 intel_ring_advance(rq, cs);
3235 i915_request_get(rq);
3236 i915_request_add(rq);
3238 engine->sched_engine->schedule(rq, &attr);
3240 if (i915_request_wait(rq, 0, HZ / 2) < 0)
3242 i915_request_put(rq);
3247 static int live_preempt_user(void *arg)
3249 struct intel_gt *gt = arg;
3250 struct intel_engine_cs *engine;
3251 struct i915_vma *global;
3252 enum intel_engine_id id;
3257 * In our other tests, we look at preemption in carefully
3258 * controlled conditions in the ringbuffer. Since most of the
3259 * time is spent in user batches, most of our preemptions naturally
3260 * occur there. We want to verify that when we preempt inside a batch
3261 * we continue on from the current instruction and do not roll back
3262 * to the start, or another earlier arbitration point.
3264 * To verify this, we create a batch which is a mixture of
3265 * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with
3266 * a few preempting contexts thrown into the mix, we look for any
3267 * repeated instructions (which show up as incorrect values).
3270 global = create_global(gt, 4096);
3272 return PTR_ERR(global);
3274 result = i915_gem_object_pin_map_unlocked(global->obj, I915_MAP_WC);
3275 if (IS_ERR(result)) {
3276 i915_vma_unpin_and_release(&global, 0);
3277 return PTR_ERR(result);
3280 for_each_engine(engine, gt, id) {
3281 struct i915_request *client[3] = {};
3282 struct igt_live_test t;
3285 if (!intel_engine_has_preemption(engine))
3288 if (GRAPHICS_VER(gt->i915) == 8 && engine->class != RENDER_CLASS)
3289 continue; /* we need per-context GPR */
3291 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
3296 memset(result, 0, 4096);
3298 for (i = 0; i < ARRAY_SIZE(client); i++) {
3299 struct i915_request *rq;
3301 rq = create_gpr_client(engine, global,
3302 NUM_GPR * i * sizeof(u32));
3311 /* Continuously preempt the set of 3 running contexts */
3312 for (i = 1; i <= NUM_GPR; i++) {
3313 err = preempt_user(engine, global, i);
3318 if (READ_ONCE(result[0]) != NUM_GPR) {
3319 pr_err("%s: Failed to release semaphore\n",
3325 for (i = 0; i < ARRAY_SIZE(client); i++) {
3328 if (i915_request_wait(client[i], 0, HZ / 2) < 0) {
3333 for (gpr = 1; gpr < NUM_GPR; gpr++) {
3334 if (result[NUM_GPR * i + gpr] != 1) {
3335 pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n",
3337 i, gpr, result[NUM_GPR * i + gpr]);
3345 for (i = 0; i < ARRAY_SIZE(client); i++) {
3349 i915_request_put(client[i]);
3352 /* Flush the semaphores on error */
3353 smp_store_mb(result[0], -1);
3354 if (igt_live_test_end(&t))
3360 i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP);
3364 static int live_preempt_timeout(void *arg)
3366 struct intel_gt *gt = arg;
3367 struct i915_gem_context *ctx_hi, *ctx_lo;
3368 struct igt_spinner spin_lo;
3369 struct intel_engine_cs *engine;
3370 enum intel_engine_id id;
3374 * Check that we force preemption to occur by cancelling the previous
3375 * context if it refuses to yield the GPU.
3377 if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
3380 if (!intel_has_reset_engine(gt))
3383 ctx_hi = kernel_context(gt->i915, NULL);
3386 ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
3388 ctx_lo = kernel_context(gt->i915, NULL);
3391 ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
3393 if (igt_spinner_init(&spin_lo, gt))
3396 for_each_engine(engine, gt, id) {
3397 unsigned long saved_timeout;
3398 struct i915_request *rq;
3400 if (!intel_engine_has_preemption(engine))
3403 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
3404 MI_NOOP); /* preemption disabled */
3410 i915_request_add(rq);
3411 if (!igt_wait_for_spinner(&spin_lo, rq)) {
3412 intel_gt_set_wedged(gt);
3417 rq = igt_request_alloc(ctx_hi, engine);
3419 igt_spinner_end(&spin_lo);
3424 /* Flush the previous CS ack before changing timeouts */
3425 while (READ_ONCE(engine->execlists.pending[0]))
3428 saved_timeout = engine->props.preempt_timeout_ms;
3429 engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffy */
3431 i915_request_get(rq);
3432 i915_request_add(rq);
3434 intel_engine_flush_submission(engine);
3435 engine->props.preempt_timeout_ms = saved_timeout;
3437 if (i915_request_wait(rq, 0, HZ / 10) < 0) {
3438 intel_gt_set_wedged(gt);
3439 i915_request_put(rq);
3444 igt_spinner_end(&spin_lo);
3445 i915_request_put(rq);
3450 igt_spinner_fini(&spin_lo);
3452 kernel_context_close(ctx_lo);
3454 kernel_context_close(ctx_hi);
3458 static int random_range(struct rnd_state *rnd, int min, int max)
3460 return i915_prandom_u32_max_state(max - min, rnd) + min;
3463 static int random_priority(struct rnd_state *rnd)
3465 return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
3468 struct preempt_smoke {
3469 struct intel_gt *gt;
3470 struct kthread_work work;
3471 struct i915_gem_context **contexts;
3472 struct intel_engine_cs *engine;
3473 struct drm_i915_gem_object *batch;
3474 unsigned int ncontext;
3475 struct rnd_state prng;
3476 unsigned long count;
3480 static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
3482 return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
3486 static int smoke_submit(struct preempt_smoke *smoke,
3487 struct i915_gem_context *ctx, int prio,
3488 struct drm_i915_gem_object *batch)
3490 struct i915_request *rq;
3491 struct i915_vma *vma = NULL;
3495 struct i915_address_space *vm;
3497 vm = i915_gem_context_get_eb_vm(ctx);
3498 vma = i915_vma_instance(batch, vm, NULL);
3501 return PTR_ERR(vma);
3503 err = i915_vma_pin(vma, 0, 0, PIN_USER);
3508 ctx->sched.priority = prio;
3510 rq = igt_request_alloc(ctx, smoke->engine);
3517 err = igt_vma_move_to_active_unlocked(vma, rq, 0);
3519 err = rq->engine->emit_bb_start(rq,
3520 i915_vma_offset(vma),
3524 i915_request_add(rq);
3528 i915_vma_unpin(vma);
3533 static void smoke_crescendo_work(struct kthread_work *work)
3535 struct preempt_smoke *smoke = container_of(work, typeof(*smoke), work);
3536 IGT_TIMEOUT(end_time);
3537 unsigned long count;
3541 struct i915_gem_context *ctx = smoke_context(smoke);
3543 smoke->result = smoke_submit(smoke, ctx,
3544 count % I915_PRIORITY_MAX,
3548 } while (!smoke->result && count < smoke->ncontext &&
3549 !__igt_timeout(end_time, NULL));
3551 smoke->count = count;
3554 static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
3555 #define BATCH BIT(0)
3557 struct kthread_worker *worker[I915_NUM_ENGINES] = {};
3558 struct preempt_smoke *arg;
3559 struct intel_engine_cs *engine;
3560 enum intel_engine_id id;
3561 unsigned long count;
3564 arg = kmalloc_array(I915_NUM_ENGINES, sizeof(*arg), GFP_KERNEL);
3568 memset(arg, 0, I915_NUM_ENGINES * sizeof(*arg));
3570 for_each_engine(engine, smoke->gt, id) {
3572 arg[id].engine = engine;
3573 if (!(flags & BATCH))
3574 arg[id].batch = NULL;
3577 worker[id] = kthread_create_worker(0, "igt/smoke:%d", id);
3578 if (IS_ERR(worker[id])) {
3579 err = PTR_ERR(worker[id]);
3583 kthread_init_work(&arg[id].work, smoke_crescendo_work);
3584 kthread_queue_work(worker[id], &arg[id].work);
3588 for_each_engine(engine, smoke->gt, id) {
3589 if (IS_ERR_OR_NULL(worker[id]))
3592 kthread_flush_work(&arg[id].work);
3593 if (arg[id].result && !err)
3594 err = arg[id].result;
3596 count += arg[id].count;
3598 kthread_destroy_worker(worker[id]);
3601 pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
3602 count, flags, smoke->gt->info.num_engines, smoke->ncontext);
3608 static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
3610 enum intel_engine_id id;
3611 IGT_TIMEOUT(end_time);
3612 unsigned long count;
3616 for_each_engine(smoke->engine, smoke->gt, id) {
3617 struct i915_gem_context *ctx = smoke_context(smoke);
3620 err = smoke_submit(smoke,
3621 ctx, random_priority(&smoke->prng),
3622 flags & BATCH ? smoke->batch : NULL);
3628 } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
3630 pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
3631 count, flags, smoke->gt->info.num_engines, smoke->ncontext);
3635 static int live_preempt_smoke(void *arg)
3637 struct preempt_smoke smoke = {
3639 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
3642 const unsigned int phase[] = { 0, BATCH };
3643 struct igt_live_test t;
3648 smoke.contexts = kmalloc_array(smoke.ncontext,
3649 sizeof(*smoke.contexts),
3651 if (!smoke.contexts)
3655 i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
3656 if (IS_ERR(smoke.batch)) {
3657 err = PTR_ERR(smoke.batch);
3661 cs = i915_gem_object_pin_map_unlocked(smoke.batch, I915_MAP_WB);
3666 for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
3667 cs[n] = MI_ARB_CHECK;
3668 cs[n] = MI_BATCH_BUFFER_END;
3669 i915_gem_object_flush_map(smoke.batch);
3670 i915_gem_object_unpin_map(smoke.batch);
3672 if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) {
3677 for (n = 0; n < smoke.ncontext; n++) {
3678 smoke.contexts[n] = kernel_context(smoke.gt->i915, NULL);
3679 if (!smoke.contexts[n])
3683 for (n = 0; n < ARRAY_SIZE(phase); n++) {
3684 err = smoke_crescendo(&smoke, phase[n]);
3688 err = smoke_random(&smoke, phase[n]);
3694 if (igt_live_test_end(&t))
3697 for (n = 0; n < smoke.ncontext; n++) {
3698 if (!smoke.contexts[n])
3700 kernel_context_close(smoke.contexts[n]);
3704 i915_gem_object_put(smoke.batch);
3706 kfree(smoke.contexts);
3711 static int nop_virtual_engine(struct intel_gt *gt,
3712 struct intel_engine_cs **siblings,
3713 unsigned int nsibling,
3716 #define CHAIN BIT(0)
3718 IGT_TIMEOUT(end_time);
3719 struct i915_request *request[16] = {};
3720 struct intel_context *ve[16];
3721 unsigned long n, prime, nc;
3722 struct igt_live_test t;
3723 ktime_t times[2] = {};
3726 GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
3728 for (n = 0; n < nctx; n++) {
3729 ve[n] = intel_engine_create_virtual(siblings, nsibling, 0);
3730 if (IS_ERR(ve[n])) {
3731 err = PTR_ERR(ve[n]);
3736 err = intel_context_pin(ve[n]);
3738 intel_context_put(ve[n]);
3744 err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name);
3748 for_each_prime_number_from(prime, 1, 8192) {
3749 times[1] = ktime_get_raw();
3751 if (flags & CHAIN) {
3752 for (nc = 0; nc < nctx; nc++) {
3753 for (n = 0; n < prime; n++) {
3754 struct i915_request *rq;
3756 rq = i915_request_create(ve[nc]);
3763 i915_request_put(request[nc]);
3764 request[nc] = i915_request_get(rq);
3765 i915_request_add(rq);
3769 for (n = 0; n < prime; n++) {
3770 for (nc = 0; nc < nctx; nc++) {
3771 struct i915_request *rq;
3773 rq = i915_request_create(ve[nc]);
3780 i915_request_put(request[nc]);
3781 request[nc] = i915_request_get(rq);
3782 i915_request_add(rq);
3787 for (nc = 0; nc < nctx; nc++) {
3788 if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
3789 pr_err("%s(%s): wait for %llx:%lld timed out\n",
3790 __func__, ve[0]->engine->name,
3791 request[nc]->fence.context,
3792 request[nc]->fence.seqno);
3794 GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
3795 __func__, ve[0]->engine->name,
3796 request[nc]->fence.context,
3797 request[nc]->fence.seqno);
3799 intel_gt_set_wedged(gt);
3804 times[1] = ktime_sub(ktime_get_raw(), times[1]);
3806 times[0] = times[1];
3808 for (nc = 0; nc < nctx; nc++) {
3809 i915_request_put(request[nc]);
3813 if (__igt_timeout(end_time, NULL))
3817 err = igt_live_test_end(&t);
3821 pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
3822 nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
3823 prime, div64_u64(ktime_to_ns(times[1]), prime));
3826 if (igt_flush_test(gt->i915))
3829 for (nc = 0; nc < nctx; nc++) {
3830 i915_request_put(request[nc]);
3831 intel_context_unpin(ve[nc]);
3832 intel_context_put(ve[nc]);
3838 __select_siblings(struct intel_gt *gt,
3840 struct intel_engine_cs **siblings,
3841 bool (*filter)(const struct intel_engine_cs *))
3846 for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
3847 if (!gt->engine_class[class][inst])
3850 if (filter && !filter(gt->engine_class[class][inst]))
3853 siblings[n++] = gt->engine_class[class][inst];
3860 select_siblings(struct intel_gt *gt,
3862 struct intel_engine_cs **siblings)
3864 return __select_siblings(gt, class, siblings, NULL);
3867 static int live_virtual_engine(void *arg)
3869 struct intel_gt *gt = arg;
3870 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
3871 struct intel_engine_cs *engine;
3872 enum intel_engine_id id;
3876 if (intel_uc_uses_guc_submission(>->uc))
3879 for_each_engine(engine, gt, id) {
3880 err = nop_virtual_engine(gt, &engine, 1, 1, 0);
3882 pr_err("Failed to wrap engine %s: err=%d\n",
3888 for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
3891 nsibling = select_siblings(gt, class, siblings);
3895 for (n = 1; n <= nsibling + 1; n++) {
3896 err = nop_virtual_engine(gt, siblings, nsibling,
3902 err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN);
3910 static int mask_virtual_engine(struct intel_gt *gt,
3911 struct intel_engine_cs **siblings,
3912 unsigned int nsibling)
3914 struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
3915 struct intel_context *ve;
3916 struct igt_live_test t;
3921 * Check that by setting the execution mask on a request, we can
3922 * restrict it to our desired engine within the virtual engine.
3925 ve = intel_engine_create_virtual(siblings, nsibling, 0);
3931 err = intel_context_pin(ve);
3935 err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
3939 for (n = 0; n < nsibling; n++) {
3940 request[n] = i915_request_create(ve);
3941 if (IS_ERR(request[n])) {
3942 err = PTR_ERR(request[n]);
3947 /* Reverse order as it's more likely to be unnatural */
3948 request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
3950 i915_request_get(request[n]);
3951 i915_request_add(request[n]);
3954 for (n = 0; n < nsibling; n++) {
3955 if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
3956 pr_err("%s(%s): wait for %llx:%lld timed out\n",
3957 __func__, ve->engine->name,
3958 request[n]->fence.context,
3959 request[n]->fence.seqno);
3961 GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
3962 __func__, ve->engine->name,
3963 request[n]->fence.context,
3964 request[n]->fence.seqno);
3966 intel_gt_set_wedged(gt);
3971 if (request[n]->engine != siblings[nsibling - n - 1]) {
3972 pr_err("Executed on wrong sibling '%s', expected '%s'\n",
3973 request[n]->engine->name,
3974 siblings[nsibling - n - 1]->name);
3980 err = igt_live_test_end(&t);
3982 if (igt_flush_test(gt->i915))
3985 for (n = 0; n < nsibling; n++)
3986 i915_request_put(request[n]);
3989 intel_context_unpin(ve);
3991 intel_context_put(ve);
3996 static int live_virtual_mask(void *arg)
3998 struct intel_gt *gt = arg;
3999 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
4003 if (intel_uc_uses_guc_submission(>->uc))
4006 for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
4007 unsigned int nsibling;
4009 nsibling = select_siblings(gt, class, siblings);
4013 err = mask_virtual_engine(gt, siblings, nsibling);
4021 static int slicein_virtual_engine(struct intel_gt *gt,
4022 struct intel_engine_cs **siblings,
4023 unsigned int nsibling)
4025 const long timeout = slice_timeout(siblings[0]);
4026 struct intel_context *ce;
4027 struct i915_request *rq;
4028 struct igt_spinner spin;
4033 * Virtual requests must take part in timeslicing on the target engines.
4036 if (igt_spinner_init(&spin, gt))
4039 for (n = 0; n < nsibling; n++) {
4040 ce = intel_context_create(siblings[n]);
4046 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
4047 intel_context_put(ce);
4053 i915_request_add(rq);
4056 ce = intel_engine_create_virtual(siblings, nsibling, 0);
4062 rq = intel_context_create_request(ce);
4063 intel_context_put(ce);
4069 i915_request_get(rq);
4070 i915_request_add(rq);
4071 if (i915_request_wait(rq, 0, timeout) < 0) {
4072 GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
4073 __func__, rq->engine->name);
4075 intel_gt_set_wedged(gt);
4078 i915_request_put(rq);
4081 igt_spinner_end(&spin);
4082 if (igt_flush_test(gt->i915))
4084 igt_spinner_fini(&spin);
4088 static int sliceout_virtual_engine(struct intel_gt *gt,
4089 struct intel_engine_cs **siblings,
4090 unsigned int nsibling)
4092 const long timeout = slice_timeout(siblings[0]);
4093 struct intel_context *ce;
4094 struct i915_request *rq;
4095 struct igt_spinner spin;
4100 * Virtual requests must allow others a fair timeslice.
4103 if (igt_spinner_init(&spin, gt))
4106 /* XXX We do not handle oversubscription and fairness with normal rq */
4107 for (n = 0; n < nsibling; n++) {
4108 ce = intel_engine_create_virtual(siblings, nsibling, 0);
4114 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
4115 intel_context_put(ce);
4121 i915_request_add(rq);
4124 for (n = 0; !err && n < nsibling; n++) {
4125 ce = intel_context_create(siblings[n]);
4131 rq = intel_context_create_request(ce);
4132 intel_context_put(ce);
4138 i915_request_get(rq);
4139 i915_request_add(rq);
4140 if (i915_request_wait(rq, 0, timeout) < 0) {
4141 GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
4142 __func__, siblings[n]->name);
4144 intel_gt_set_wedged(gt);
4147 i915_request_put(rq);
4151 igt_spinner_end(&spin);
4152 if (igt_flush_test(gt->i915))
4154 igt_spinner_fini(&spin);
4158 static int live_virtual_slice(void *arg)
4160 struct intel_gt *gt = arg;
4161 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
4165 if (intel_uc_uses_guc_submission(>->uc))
4168 for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
4169 unsigned int nsibling;
4171 nsibling = __select_siblings(gt, class, siblings,
4172 intel_engine_has_timeslices);
4176 err = slicein_virtual_engine(gt, siblings, nsibling);
4180 err = sliceout_virtual_engine(gt, siblings, nsibling);
4188 static int preserved_virtual_engine(struct intel_gt *gt,
4189 struct intel_engine_cs **siblings,
4190 unsigned int nsibling)
4192 struct i915_request *last = NULL;
4193 struct intel_context *ve;
4194 struct i915_vma *scratch;
4195 struct igt_live_test t;
4201 __vm_create_scratch_for_read_pinned(&siblings[0]->gt->ggtt->vm,
4203 if (IS_ERR(scratch))
4204 return PTR_ERR(scratch);
4206 err = i915_vma_sync(scratch);
4210 ve = intel_engine_create_virtual(siblings, nsibling, 0);
4216 err = intel_context_pin(ve);
4220 err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
4224 for (n = 0; n < NUM_GPR_DW; n++) {
4225 struct intel_engine_cs *engine = siblings[n % nsibling];
4226 struct i915_request *rq;
4228 rq = i915_request_create(ve);
4234 i915_request_put(last);
4235 last = i915_request_get(rq);
4237 cs = intel_ring_begin(rq, 8);
4239 i915_request_add(rq);
4244 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
4245 *cs++ = CS_GPR(engine, n);
4246 *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
4249 *cs++ = MI_LOAD_REGISTER_IMM(1);
4250 *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW);
4254 intel_ring_advance(rq, cs);
4256 /* Restrict this request to run on a particular engine */
4257 rq->execution_mask = engine->mask;
4258 i915_request_add(rq);
4261 if (i915_request_wait(last, 0, HZ / 5) < 0) {
4266 cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
4272 for (n = 0; n < NUM_GPR_DW; n++) {
4274 pr_err("Incorrect value[%d] found for GPR[%d]\n",
4281 i915_gem_object_unpin_map(scratch->obj);
4284 if (igt_live_test_end(&t))
4286 i915_request_put(last);
4288 intel_context_unpin(ve);
4290 intel_context_put(ve);
4292 i915_vma_unpin_and_release(&scratch, 0);
4296 static int live_virtual_preserved(void *arg)
4298 struct intel_gt *gt = arg;
4299 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
4303 * Check that the context image retains non-privileged (user) registers
4304 * from one engine to the next. For this we check that the CS_GPR
4308 if (intel_uc_uses_guc_submission(>->uc))
4311 /* As we use CS_GPR we cannot run before they existed on all engines. */
4312 if (GRAPHICS_VER(gt->i915) < 9)
4315 for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
4318 nsibling = select_siblings(gt, class, siblings);
4322 err = preserved_virtual_engine(gt, siblings, nsibling);
4330 static int reset_virtual_engine(struct intel_gt *gt,
4331 struct intel_engine_cs **siblings,
4332 unsigned int nsibling)
4334 struct intel_engine_cs *engine;
4335 struct intel_context *ve;
4336 struct igt_spinner spin;
4337 struct i915_request *rq;
4342 * In order to support offline error capture for fast preempt reset,
4343 * we need to decouple the guilty request and ensure that it and its
4344 * descendents are not executed while the capture is in progress.
4347 if (igt_spinner_init(&spin, gt))
4350 ve = intel_engine_create_virtual(siblings, nsibling, 0);
4356 for (n = 0; n < nsibling; n++)
4357 st_engine_heartbeat_disable(siblings[n]);
4359 rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
4364 i915_request_add(rq);
4366 if (!igt_wait_for_spinner(&spin, rq)) {
4367 intel_gt_set_wedged(gt);
4372 engine = rq->engine;
4373 GEM_BUG_ON(engine == ve->engine);
4375 /* Take ownership of the reset and tasklet */
4376 err = engine_lock_reset_tasklet(engine);
4380 engine->sched_engine->tasklet.callback(&engine->sched_engine->tasklet);
4381 GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
4383 /* Fake a preemption event; failed of course */
4384 spin_lock_irq(&engine->sched_engine->lock);
4385 __unwind_incomplete_requests(engine);
4386 spin_unlock_irq(&engine->sched_engine->lock);
4387 GEM_BUG_ON(rq->engine != engine);
4389 /* Reset the engine while keeping our active request on hold */
4390 execlists_hold(engine, rq);
4391 GEM_BUG_ON(!i915_request_on_hold(rq));
4393 __intel_engine_reset_bh(engine, NULL);
4394 GEM_BUG_ON(rq->fence.error != -EIO);
4396 /* Release our grasp on the engine, letting CS flow again */
4397 engine_unlock_reset_tasklet(engine);
4399 /* Check that we do not resubmit the held request */
4400 i915_request_get(rq);
4401 if (!i915_request_wait(rq, 0, HZ / 5)) {
4402 pr_err("%s: on hold request completed!\n",
4404 intel_gt_set_wedged(gt);
4408 GEM_BUG_ON(!i915_request_on_hold(rq));
4410 /* But is resubmitted on release */
4411 execlists_unhold(engine, rq);
4412 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
4413 pr_err("%s: held request did not complete!\n",
4415 intel_gt_set_wedged(gt);
4420 i915_request_put(rq);
4422 for (n = 0; n < nsibling; n++)
4423 st_engine_heartbeat_enable(siblings[n]);
4425 intel_context_put(ve);
4427 igt_spinner_fini(&spin);
4431 static int live_virtual_reset(void *arg)
4433 struct intel_gt *gt = arg;
4434 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
4438 * Check that we handle a reset event within a virtual engine.
4439 * Only the physical engine is reset, but we have to check the flow
4440 * of the virtual requests around the reset, and make sure it is not
4444 if (intel_uc_uses_guc_submission(>->uc))
4447 if (!intel_has_reset_engine(gt))
4450 for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
4453 nsibling = select_siblings(gt, class, siblings);
4457 err = reset_virtual_engine(gt, siblings, nsibling);
4465 int intel_execlists_live_selftests(struct drm_i915_private *i915)
4467 static const struct i915_subtest tests[] = {
4468 SUBTEST(live_sanitycheck),
4469 SUBTEST(live_unlite_switch),
4470 SUBTEST(live_unlite_preempt),
4471 SUBTEST(live_unlite_ring),
4472 SUBTEST(live_pin_rewind),
4473 SUBTEST(live_hold_reset),
4474 SUBTEST(live_error_interrupt),
4475 SUBTEST(live_timeslice_preempt),
4476 SUBTEST(live_timeslice_rewind),
4477 SUBTEST(live_timeslice_queue),
4478 SUBTEST(live_timeslice_nopreempt),
4479 SUBTEST(live_busywait_preempt),
4480 SUBTEST(live_preempt),
4481 SUBTEST(live_late_preempt),
4482 SUBTEST(live_nopreempt),
4483 SUBTEST(live_preempt_cancel),
4484 SUBTEST(live_suppress_self_preempt),
4485 SUBTEST(live_chain_preempt),
4486 SUBTEST(live_preempt_ring),
4487 SUBTEST(live_preempt_gang),
4488 SUBTEST(live_preempt_timeout),
4489 SUBTEST(live_preempt_user),
4490 SUBTEST(live_preempt_smoke),
4491 SUBTEST(live_virtual_engine),
4492 SUBTEST(live_virtual_mask),
4493 SUBTEST(live_virtual_preserved),
4494 SUBTEST(live_virtual_slice),
4495 SUBTEST(live_virtual_reset),
4498 if (to_gt(i915)->submission_method != INTEL_SUBMISSION_ELSP)
4501 if (intel_gt_is_wedged(to_gt(i915)))
4504 return intel_gt_live_subtests(tests, to_gt(i915));