2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include "../i915_selftest.h"
8 #include "igt_flush_test.h"
9 #include "igt_spinner.h"
10 #include "i915_random.h"
12 #include "mock_context.h"
14 static int live_sanitycheck(void *arg)
16 struct drm_i915_private *i915 = arg;
17 struct intel_engine_cs *engine;
18 struct i915_gem_context *ctx;
19 enum intel_engine_id id;
20 struct igt_spinner spin;
23 if (!HAS_LOGICAL_RING_CONTEXTS(i915))
26 mutex_lock(&i915->drm.struct_mutex);
27 intel_runtime_pm_get(i915);
29 if (igt_spinner_init(&spin, i915))
32 ctx = kernel_context(i915);
36 for_each_engine(engine, i915, id) {
37 struct i915_request *rq;
39 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
46 if (!igt_wait_for_spinner(&spin, rq)) {
47 GEM_TRACE("spinner failed to start\n");
49 i915_gem_set_wedged(i915);
54 igt_spinner_end(&spin);
55 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
63 kernel_context_close(ctx);
65 igt_spinner_fini(&spin);
67 igt_flush_test(i915, I915_WAIT_LOCKED);
68 intel_runtime_pm_put(i915);
69 mutex_unlock(&i915->drm.struct_mutex);
73 static int live_preempt(void *arg)
75 struct drm_i915_private *i915 = arg;
76 struct i915_gem_context *ctx_hi, *ctx_lo;
77 struct igt_spinner spin_hi, spin_lo;
78 struct intel_engine_cs *engine;
79 enum intel_engine_id id;
82 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
85 mutex_lock(&i915->drm.struct_mutex);
86 intel_runtime_pm_get(i915);
88 if (igt_spinner_init(&spin_hi, i915))
91 if (igt_spinner_init(&spin_lo, i915))
94 ctx_hi = kernel_context(i915);
97 ctx_hi->sched.priority =
98 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
100 ctx_lo = kernel_context(i915);
103 ctx_lo->sched.priority =
104 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
106 for_each_engine(engine, i915, id) {
107 struct i915_request *rq;
109 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
116 i915_request_add(rq);
117 if (!igt_wait_for_spinner(&spin_lo, rq)) {
118 GEM_TRACE("lo spinner failed to start\n");
120 i915_gem_set_wedged(i915);
125 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
128 igt_spinner_end(&spin_lo);
133 i915_request_add(rq);
134 if (!igt_wait_for_spinner(&spin_hi, rq)) {
135 GEM_TRACE("hi spinner failed to start\n");
137 i915_gem_set_wedged(i915);
142 igt_spinner_end(&spin_hi);
143 igt_spinner_end(&spin_lo);
144 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
152 kernel_context_close(ctx_lo);
154 kernel_context_close(ctx_hi);
156 igt_spinner_fini(&spin_lo);
158 igt_spinner_fini(&spin_hi);
160 igt_flush_test(i915, I915_WAIT_LOCKED);
161 intel_runtime_pm_put(i915);
162 mutex_unlock(&i915->drm.struct_mutex);
166 static int live_late_preempt(void *arg)
168 struct drm_i915_private *i915 = arg;
169 struct i915_gem_context *ctx_hi, *ctx_lo;
170 struct igt_spinner spin_hi, spin_lo;
171 struct intel_engine_cs *engine;
172 struct i915_sched_attr attr = {};
173 enum intel_engine_id id;
176 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
179 mutex_lock(&i915->drm.struct_mutex);
180 intel_runtime_pm_get(i915);
182 if (igt_spinner_init(&spin_hi, i915))
185 if (igt_spinner_init(&spin_lo, i915))
188 ctx_hi = kernel_context(i915);
192 ctx_lo = kernel_context(i915);
196 for_each_engine(engine, i915, id) {
197 struct i915_request *rq;
199 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
206 i915_request_add(rq);
207 if (!igt_wait_for_spinner(&spin_lo, rq)) {
208 pr_err("First context failed to start\n");
212 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
215 igt_spinner_end(&spin_lo);
220 i915_request_add(rq);
221 if (igt_wait_for_spinner(&spin_hi, rq)) {
222 pr_err("Second context overtook first?\n");
226 attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
227 engine->schedule(rq, &attr);
229 if (!igt_wait_for_spinner(&spin_hi, rq)) {
230 pr_err("High priority context failed to preempt the low priority context\n");
235 igt_spinner_end(&spin_hi);
236 igt_spinner_end(&spin_lo);
237 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
245 kernel_context_close(ctx_lo);
247 kernel_context_close(ctx_hi);
249 igt_spinner_fini(&spin_lo);
251 igt_spinner_fini(&spin_hi);
253 igt_flush_test(i915, I915_WAIT_LOCKED);
254 intel_runtime_pm_put(i915);
255 mutex_unlock(&i915->drm.struct_mutex);
259 igt_spinner_end(&spin_hi);
260 igt_spinner_end(&spin_lo);
261 i915_gem_set_wedged(i915);
266 static int live_preempt_hang(void *arg)
268 struct drm_i915_private *i915 = arg;
269 struct i915_gem_context *ctx_hi, *ctx_lo;
270 struct igt_spinner spin_hi, spin_lo;
271 struct intel_engine_cs *engine;
272 enum intel_engine_id id;
275 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
278 if (!intel_has_reset_engine(i915))
281 mutex_lock(&i915->drm.struct_mutex);
282 intel_runtime_pm_get(i915);
284 if (igt_spinner_init(&spin_hi, i915))
287 if (igt_spinner_init(&spin_lo, i915))
290 ctx_hi = kernel_context(i915);
293 ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
295 ctx_lo = kernel_context(i915);
298 ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
300 for_each_engine(engine, i915, id) {
301 struct i915_request *rq;
303 if (!intel_engine_has_preemption(engine))
306 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
313 i915_request_add(rq);
314 if (!igt_wait_for_spinner(&spin_lo, rq)) {
315 GEM_TRACE("lo spinner failed to start\n");
317 i915_gem_set_wedged(i915);
322 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
325 igt_spinner_end(&spin_lo);
330 init_completion(&engine->execlists.preempt_hang.completion);
331 engine->execlists.preempt_hang.inject_hang = true;
333 i915_request_add(rq);
335 if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
337 pr_err("Preemption did not occur within timeout!");
339 i915_gem_set_wedged(i915);
344 set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
345 i915_reset_engine(engine, NULL);
346 clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
348 engine->execlists.preempt_hang.inject_hang = false;
350 if (!igt_wait_for_spinner(&spin_hi, rq)) {
351 GEM_TRACE("hi spinner failed to start\n");
353 i915_gem_set_wedged(i915);
358 igt_spinner_end(&spin_hi);
359 igt_spinner_end(&spin_lo);
360 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
368 kernel_context_close(ctx_lo);
370 kernel_context_close(ctx_hi);
372 igt_spinner_fini(&spin_lo);
374 igt_spinner_fini(&spin_hi);
376 igt_flush_test(i915, I915_WAIT_LOCKED);
377 intel_runtime_pm_put(i915);
378 mutex_unlock(&i915->drm.struct_mutex);
382 static int random_range(struct rnd_state *rnd, int min, int max)
384 return i915_prandom_u32_max_state(max - min, rnd) + min;
387 static int random_priority(struct rnd_state *rnd)
389 return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
392 struct preempt_smoke {
393 struct drm_i915_private *i915;
394 struct i915_gem_context **contexts;
395 struct intel_engine_cs *engine;
396 struct drm_i915_gem_object *batch;
397 unsigned int ncontext;
398 struct rnd_state prng;
402 static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
404 return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
408 static int smoke_submit(struct preempt_smoke *smoke,
409 struct i915_gem_context *ctx, int prio,
410 struct drm_i915_gem_object *batch)
412 struct i915_request *rq;
413 struct i915_vma *vma = NULL;
417 vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
421 err = i915_vma_pin(vma, 0, 0, PIN_USER);
426 ctx->sched.priority = prio;
428 rq = i915_request_alloc(smoke->engine, ctx);
435 err = rq->engine->emit_bb_start(rq,
439 err = i915_vma_move_to_active(vma, rq, 0);
442 i915_request_add(rq);
451 static int smoke_crescendo_thread(void *arg)
453 struct preempt_smoke *smoke = arg;
454 IGT_TIMEOUT(end_time);
459 struct i915_gem_context *ctx = smoke_context(smoke);
462 mutex_lock(&smoke->i915->drm.struct_mutex);
463 err = smoke_submit(smoke,
464 ctx, count % I915_PRIORITY_MAX,
466 mutex_unlock(&smoke->i915->drm.struct_mutex);
471 } while (!__igt_timeout(end_time, NULL));
473 smoke->count = count;
477 static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
480 struct task_struct *tsk[I915_NUM_ENGINES] = {};
481 struct preempt_smoke arg[I915_NUM_ENGINES];
482 struct intel_engine_cs *engine;
483 enum intel_engine_id id;
487 mutex_unlock(&smoke->i915->drm.struct_mutex);
489 for_each_engine(engine, smoke->i915, id) {
491 arg[id].engine = engine;
492 if (!(flags & BATCH))
493 arg[id].batch = NULL;
496 tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
498 if (IS_ERR(tsk[id])) {
499 err = PTR_ERR(tsk[id]);
502 get_task_struct(tsk[id]);
506 for_each_engine(engine, smoke->i915, id) {
509 if (IS_ERR_OR_NULL(tsk[id]))
512 status = kthread_stop(tsk[id]);
516 count += arg[id].count;
518 put_task_struct(tsk[id]);
521 mutex_lock(&smoke->i915->drm.struct_mutex);
523 pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
525 INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
529 static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
531 enum intel_engine_id id;
532 IGT_TIMEOUT(end_time);
537 for_each_engine(smoke->engine, smoke->i915, id) {
538 struct i915_gem_context *ctx = smoke_context(smoke);
541 err = smoke_submit(smoke,
542 ctx, random_priority(&smoke->prng),
543 flags & BATCH ? smoke->batch : NULL);
549 } while (!__igt_timeout(end_time, NULL));
551 pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
553 INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
557 static int live_preempt_smoke(void *arg)
559 struct preempt_smoke smoke = {
561 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
564 const unsigned int phase[] = { 0, BATCH };
569 if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
572 smoke.contexts = kmalloc_array(smoke.ncontext,
573 sizeof(*smoke.contexts),
578 mutex_lock(&smoke.i915->drm.struct_mutex);
579 intel_runtime_pm_get(smoke.i915);
581 smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
582 if (IS_ERR(smoke.batch)) {
583 err = PTR_ERR(smoke.batch);
587 cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
592 for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
593 cs[n] = MI_ARB_CHECK;
594 cs[n] = MI_BATCH_BUFFER_END;
595 i915_gem_object_unpin_map(smoke.batch);
597 err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
601 for (n = 0; n < smoke.ncontext; n++) {
602 smoke.contexts[n] = kernel_context(smoke.i915);
603 if (!smoke.contexts[n])
607 for (n = 0; n < ARRAY_SIZE(phase); n++) {
608 err = smoke_crescendo(&smoke, phase[n]);
612 err = smoke_random(&smoke, phase[n]);
618 if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
621 for (n = 0; n < smoke.ncontext; n++) {
622 if (!smoke.contexts[n])
624 kernel_context_close(smoke.contexts[n]);
628 i915_gem_object_put(smoke.batch);
630 intel_runtime_pm_put(smoke.i915);
631 mutex_unlock(&smoke.i915->drm.struct_mutex);
632 kfree(smoke.contexts);
637 int intel_execlists_live_selftests(struct drm_i915_private *i915)
639 static const struct i915_subtest tests[] = {
640 SUBTEST(live_sanitycheck),
641 SUBTEST(live_preempt),
642 SUBTEST(live_late_preempt),
643 SUBTEST(live_preempt_hang),
644 SUBTEST(live_preempt_smoke),
647 if (!HAS_EXECLISTS(i915))
650 if (i915_terminally_wedged(&i915->gpu_error))
653 return i915_subtests(tests, i915);