]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/selftests/intel_lrc.c
Merge tag 'microblaze-v5.0-rc1' of git://git.monstr.eu/linux-2.6-microblaze
[linux.git] / drivers / gpu / drm / i915 / selftests / intel_lrc.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include "../i915_selftest.h"
8 #include "igt_flush_test.h"
9 #include "igt_spinner.h"
10 #include "i915_random.h"
11
12 #include "mock_context.h"
13
14 static int live_sanitycheck(void *arg)
15 {
16         struct drm_i915_private *i915 = arg;
17         struct intel_engine_cs *engine;
18         struct i915_gem_context *ctx;
19         enum intel_engine_id id;
20         struct igt_spinner spin;
21         int err = -ENOMEM;
22
23         if (!HAS_LOGICAL_RING_CONTEXTS(i915))
24                 return 0;
25
26         mutex_lock(&i915->drm.struct_mutex);
27         intel_runtime_pm_get(i915);
28
29         if (igt_spinner_init(&spin, i915))
30                 goto err_unlock;
31
32         ctx = kernel_context(i915);
33         if (!ctx)
34                 goto err_spin;
35
36         for_each_engine(engine, i915, id) {
37                 struct i915_request *rq;
38
39                 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
40                 if (IS_ERR(rq)) {
41                         err = PTR_ERR(rq);
42                         goto err_ctx;
43                 }
44
45                 i915_request_add(rq);
46                 if (!igt_wait_for_spinner(&spin, rq)) {
47                         GEM_TRACE("spinner failed to start\n");
48                         GEM_TRACE_DUMP();
49                         i915_gem_set_wedged(i915);
50                         err = -EIO;
51                         goto err_ctx;
52                 }
53
54                 igt_spinner_end(&spin);
55                 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
56                         err = -EIO;
57                         goto err_ctx;
58                 }
59         }
60
61         err = 0;
62 err_ctx:
63         kernel_context_close(ctx);
64 err_spin:
65         igt_spinner_fini(&spin);
66 err_unlock:
67         igt_flush_test(i915, I915_WAIT_LOCKED);
68         intel_runtime_pm_put(i915);
69         mutex_unlock(&i915->drm.struct_mutex);
70         return err;
71 }
72
73 static int live_preempt(void *arg)
74 {
75         struct drm_i915_private *i915 = arg;
76         struct i915_gem_context *ctx_hi, *ctx_lo;
77         struct igt_spinner spin_hi, spin_lo;
78         struct intel_engine_cs *engine;
79         enum intel_engine_id id;
80         int err = -ENOMEM;
81
82         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
83                 return 0;
84
85         mutex_lock(&i915->drm.struct_mutex);
86         intel_runtime_pm_get(i915);
87
88         if (igt_spinner_init(&spin_hi, i915))
89                 goto err_unlock;
90
91         if (igt_spinner_init(&spin_lo, i915))
92                 goto err_spin_hi;
93
94         ctx_hi = kernel_context(i915);
95         if (!ctx_hi)
96                 goto err_spin_lo;
97         ctx_hi->sched.priority =
98                 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
99
100         ctx_lo = kernel_context(i915);
101         if (!ctx_lo)
102                 goto err_ctx_hi;
103         ctx_lo->sched.priority =
104                 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
105
106         for_each_engine(engine, i915, id) {
107                 struct i915_request *rq;
108
109                 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
110                                                 MI_ARB_CHECK);
111                 if (IS_ERR(rq)) {
112                         err = PTR_ERR(rq);
113                         goto err_ctx_lo;
114                 }
115
116                 i915_request_add(rq);
117                 if (!igt_wait_for_spinner(&spin_lo, rq)) {
118                         GEM_TRACE("lo spinner failed to start\n");
119                         GEM_TRACE_DUMP();
120                         i915_gem_set_wedged(i915);
121                         err = -EIO;
122                         goto err_ctx_lo;
123                 }
124
125                 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
126                                                 MI_ARB_CHECK);
127                 if (IS_ERR(rq)) {
128                         igt_spinner_end(&spin_lo);
129                         err = PTR_ERR(rq);
130                         goto err_ctx_lo;
131                 }
132
133                 i915_request_add(rq);
134                 if (!igt_wait_for_spinner(&spin_hi, rq)) {
135                         GEM_TRACE("hi spinner failed to start\n");
136                         GEM_TRACE_DUMP();
137                         i915_gem_set_wedged(i915);
138                         err = -EIO;
139                         goto err_ctx_lo;
140                 }
141
142                 igt_spinner_end(&spin_hi);
143                 igt_spinner_end(&spin_lo);
144                 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
145                         err = -EIO;
146                         goto err_ctx_lo;
147                 }
148         }
149
150         err = 0;
151 err_ctx_lo:
152         kernel_context_close(ctx_lo);
153 err_ctx_hi:
154         kernel_context_close(ctx_hi);
155 err_spin_lo:
156         igt_spinner_fini(&spin_lo);
157 err_spin_hi:
158         igt_spinner_fini(&spin_hi);
159 err_unlock:
160         igt_flush_test(i915, I915_WAIT_LOCKED);
161         intel_runtime_pm_put(i915);
162         mutex_unlock(&i915->drm.struct_mutex);
163         return err;
164 }
165
166 static int live_late_preempt(void *arg)
167 {
168         struct drm_i915_private *i915 = arg;
169         struct i915_gem_context *ctx_hi, *ctx_lo;
170         struct igt_spinner spin_hi, spin_lo;
171         struct intel_engine_cs *engine;
172         struct i915_sched_attr attr = {};
173         enum intel_engine_id id;
174         int err = -ENOMEM;
175
176         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
177                 return 0;
178
179         mutex_lock(&i915->drm.struct_mutex);
180         intel_runtime_pm_get(i915);
181
182         if (igt_spinner_init(&spin_hi, i915))
183                 goto err_unlock;
184
185         if (igt_spinner_init(&spin_lo, i915))
186                 goto err_spin_hi;
187
188         ctx_hi = kernel_context(i915);
189         if (!ctx_hi)
190                 goto err_spin_lo;
191
192         ctx_lo = kernel_context(i915);
193         if (!ctx_lo)
194                 goto err_ctx_hi;
195
196         for_each_engine(engine, i915, id) {
197                 struct i915_request *rq;
198
199                 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
200                                                 MI_ARB_CHECK);
201                 if (IS_ERR(rq)) {
202                         err = PTR_ERR(rq);
203                         goto err_ctx_lo;
204                 }
205
206                 i915_request_add(rq);
207                 if (!igt_wait_for_spinner(&spin_lo, rq)) {
208                         pr_err("First context failed to start\n");
209                         goto err_wedged;
210                 }
211
212                 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
213                                                 MI_NOOP);
214                 if (IS_ERR(rq)) {
215                         igt_spinner_end(&spin_lo);
216                         err = PTR_ERR(rq);
217                         goto err_ctx_lo;
218                 }
219
220                 i915_request_add(rq);
221                 if (igt_wait_for_spinner(&spin_hi, rq)) {
222                         pr_err("Second context overtook first?\n");
223                         goto err_wedged;
224                 }
225
226                 attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
227                 engine->schedule(rq, &attr);
228
229                 if (!igt_wait_for_spinner(&spin_hi, rq)) {
230                         pr_err("High priority context failed to preempt the low priority context\n");
231                         GEM_TRACE_DUMP();
232                         goto err_wedged;
233                 }
234
235                 igt_spinner_end(&spin_hi);
236                 igt_spinner_end(&spin_lo);
237                 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
238                         err = -EIO;
239                         goto err_ctx_lo;
240                 }
241         }
242
243         err = 0;
244 err_ctx_lo:
245         kernel_context_close(ctx_lo);
246 err_ctx_hi:
247         kernel_context_close(ctx_hi);
248 err_spin_lo:
249         igt_spinner_fini(&spin_lo);
250 err_spin_hi:
251         igt_spinner_fini(&spin_hi);
252 err_unlock:
253         igt_flush_test(i915, I915_WAIT_LOCKED);
254         intel_runtime_pm_put(i915);
255         mutex_unlock(&i915->drm.struct_mutex);
256         return err;
257
258 err_wedged:
259         igt_spinner_end(&spin_hi);
260         igt_spinner_end(&spin_lo);
261         i915_gem_set_wedged(i915);
262         err = -EIO;
263         goto err_ctx_lo;
264 }
265
266 static int live_preempt_hang(void *arg)
267 {
268         struct drm_i915_private *i915 = arg;
269         struct i915_gem_context *ctx_hi, *ctx_lo;
270         struct igt_spinner spin_hi, spin_lo;
271         struct intel_engine_cs *engine;
272         enum intel_engine_id id;
273         int err = -ENOMEM;
274
275         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
276                 return 0;
277
278         if (!intel_has_reset_engine(i915))
279                 return 0;
280
281         mutex_lock(&i915->drm.struct_mutex);
282         intel_runtime_pm_get(i915);
283
284         if (igt_spinner_init(&spin_hi, i915))
285                 goto err_unlock;
286
287         if (igt_spinner_init(&spin_lo, i915))
288                 goto err_spin_hi;
289
290         ctx_hi = kernel_context(i915);
291         if (!ctx_hi)
292                 goto err_spin_lo;
293         ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
294
295         ctx_lo = kernel_context(i915);
296         if (!ctx_lo)
297                 goto err_ctx_hi;
298         ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
299
300         for_each_engine(engine, i915, id) {
301                 struct i915_request *rq;
302
303                 if (!intel_engine_has_preemption(engine))
304                         continue;
305
306                 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
307                                                 MI_ARB_CHECK);
308                 if (IS_ERR(rq)) {
309                         err = PTR_ERR(rq);
310                         goto err_ctx_lo;
311                 }
312
313                 i915_request_add(rq);
314                 if (!igt_wait_for_spinner(&spin_lo, rq)) {
315                         GEM_TRACE("lo spinner failed to start\n");
316                         GEM_TRACE_DUMP();
317                         i915_gem_set_wedged(i915);
318                         err = -EIO;
319                         goto err_ctx_lo;
320                 }
321
322                 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
323                                                 MI_ARB_CHECK);
324                 if (IS_ERR(rq)) {
325                         igt_spinner_end(&spin_lo);
326                         err = PTR_ERR(rq);
327                         goto err_ctx_lo;
328                 }
329
330                 init_completion(&engine->execlists.preempt_hang.completion);
331                 engine->execlists.preempt_hang.inject_hang = true;
332
333                 i915_request_add(rq);
334
335                 if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
336                                                  HZ / 10)) {
337                         pr_err("Preemption did not occur within timeout!");
338                         GEM_TRACE_DUMP();
339                         i915_gem_set_wedged(i915);
340                         err = -EIO;
341                         goto err_ctx_lo;
342                 }
343
344                 set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
345                 i915_reset_engine(engine, NULL);
346                 clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
347
348                 engine->execlists.preempt_hang.inject_hang = false;
349
350                 if (!igt_wait_for_spinner(&spin_hi, rq)) {
351                         GEM_TRACE("hi spinner failed to start\n");
352                         GEM_TRACE_DUMP();
353                         i915_gem_set_wedged(i915);
354                         err = -EIO;
355                         goto err_ctx_lo;
356                 }
357
358                 igt_spinner_end(&spin_hi);
359                 igt_spinner_end(&spin_lo);
360                 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
361                         err = -EIO;
362                         goto err_ctx_lo;
363                 }
364         }
365
366         err = 0;
367 err_ctx_lo:
368         kernel_context_close(ctx_lo);
369 err_ctx_hi:
370         kernel_context_close(ctx_hi);
371 err_spin_lo:
372         igt_spinner_fini(&spin_lo);
373 err_spin_hi:
374         igt_spinner_fini(&spin_hi);
375 err_unlock:
376         igt_flush_test(i915, I915_WAIT_LOCKED);
377         intel_runtime_pm_put(i915);
378         mutex_unlock(&i915->drm.struct_mutex);
379         return err;
380 }
381
382 static int random_range(struct rnd_state *rnd, int min, int max)
383 {
384         return i915_prandom_u32_max_state(max - min, rnd) + min;
385 }
386
387 static int random_priority(struct rnd_state *rnd)
388 {
389         return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
390 }
391
392 struct preempt_smoke {
393         struct drm_i915_private *i915;
394         struct i915_gem_context **contexts;
395         struct intel_engine_cs *engine;
396         struct drm_i915_gem_object *batch;
397         unsigned int ncontext;
398         struct rnd_state prng;
399         unsigned long count;
400 };
401
402 static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
403 {
404         return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
405                                                           &smoke->prng)];
406 }
407
408 static int smoke_submit(struct preempt_smoke *smoke,
409                         struct i915_gem_context *ctx, int prio,
410                         struct drm_i915_gem_object *batch)
411 {
412         struct i915_request *rq;
413         struct i915_vma *vma = NULL;
414         int err = 0;
415
416         if (batch) {
417                 vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
418                 if (IS_ERR(vma))
419                         return PTR_ERR(vma);
420
421                 err = i915_vma_pin(vma, 0, 0, PIN_USER);
422                 if (err)
423                         return err;
424         }
425
426         ctx->sched.priority = prio;
427
428         rq = i915_request_alloc(smoke->engine, ctx);
429         if (IS_ERR(rq)) {
430                 err = PTR_ERR(rq);
431                 goto unpin;
432         }
433
434         if (vma) {
435                 err = rq->engine->emit_bb_start(rq,
436                                                 vma->node.start,
437                                                 PAGE_SIZE, 0);
438                 if (!err)
439                         err = i915_vma_move_to_active(vma, rq, 0);
440         }
441
442         i915_request_add(rq);
443
444 unpin:
445         if (vma)
446                 i915_vma_unpin(vma);
447
448         return err;
449 }
450
451 static int smoke_crescendo_thread(void *arg)
452 {
453         struct preempt_smoke *smoke = arg;
454         IGT_TIMEOUT(end_time);
455         unsigned long count;
456
457         count = 0;
458         do {
459                 struct i915_gem_context *ctx = smoke_context(smoke);
460                 int err;
461
462                 mutex_lock(&smoke->i915->drm.struct_mutex);
463                 err = smoke_submit(smoke,
464                                    ctx, count % I915_PRIORITY_MAX,
465                                    smoke->batch);
466                 mutex_unlock(&smoke->i915->drm.struct_mutex);
467                 if (err)
468                         return err;
469
470                 count++;
471         } while (!__igt_timeout(end_time, NULL));
472
473         smoke->count = count;
474         return 0;
475 }
476
477 static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
478 #define BATCH BIT(0)
479 {
480         struct task_struct *tsk[I915_NUM_ENGINES] = {};
481         struct preempt_smoke arg[I915_NUM_ENGINES];
482         struct intel_engine_cs *engine;
483         enum intel_engine_id id;
484         unsigned long count;
485         int err = 0;
486
487         mutex_unlock(&smoke->i915->drm.struct_mutex);
488
489         for_each_engine(engine, smoke->i915, id) {
490                 arg[id] = *smoke;
491                 arg[id].engine = engine;
492                 if (!(flags & BATCH))
493                         arg[id].batch = NULL;
494                 arg[id].count = 0;
495
496                 tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
497                                       "igt/smoke:%d", id);
498                 if (IS_ERR(tsk[id])) {
499                         err = PTR_ERR(tsk[id]);
500                         break;
501                 }
502                 get_task_struct(tsk[id]);
503         }
504
505         count = 0;
506         for_each_engine(engine, smoke->i915, id) {
507                 int status;
508
509                 if (IS_ERR_OR_NULL(tsk[id]))
510                         continue;
511
512                 status = kthread_stop(tsk[id]);
513                 if (status && !err)
514                         err = status;
515
516                 count += arg[id].count;
517
518                 put_task_struct(tsk[id]);
519         }
520
521         mutex_lock(&smoke->i915->drm.struct_mutex);
522
523         pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
524                 count, flags,
525                 INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
526         return 0;
527 }
528
529 static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
530 {
531         enum intel_engine_id id;
532         IGT_TIMEOUT(end_time);
533         unsigned long count;
534
535         count = 0;
536         do {
537                 for_each_engine(smoke->engine, smoke->i915, id) {
538                         struct i915_gem_context *ctx = smoke_context(smoke);
539                         int err;
540
541                         err = smoke_submit(smoke,
542                                            ctx, random_priority(&smoke->prng),
543                                            flags & BATCH ? smoke->batch : NULL);
544                         if (err)
545                                 return err;
546
547                         count++;
548                 }
549         } while (!__igt_timeout(end_time, NULL));
550
551         pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
552                 count, flags,
553                 INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
554         return 0;
555 }
556
557 static int live_preempt_smoke(void *arg)
558 {
559         struct preempt_smoke smoke = {
560                 .i915 = arg,
561                 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
562                 .ncontext = 1024,
563         };
564         const unsigned int phase[] = { 0, BATCH };
565         int err = -ENOMEM;
566         u32 *cs;
567         int n;
568
569         if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
570                 return 0;
571
572         smoke.contexts = kmalloc_array(smoke.ncontext,
573                                        sizeof(*smoke.contexts),
574                                        GFP_KERNEL);
575         if (!smoke.contexts)
576                 return -ENOMEM;
577
578         mutex_lock(&smoke.i915->drm.struct_mutex);
579         intel_runtime_pm_get(smoke.i915);
580
581         smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
582         if (IS_ERR(smoke.batch)) {
583                 err = PTR_ERR(smoke.batch);
584                 goto err_unlock;
585         }
586
587         cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
588         if (IS_ERR(cs)) {
589                 err = PTR_ERR(cs);
590                 goto err_batch;
591         }
592         for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
593                 cs[n] = MI_ARB_CHECK;
594         cs[n] = MI_BATCH_BUFFER_END;
595         i915_gem_object_unpin_map(smoke.batch);
596
597         err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
598         if (err)
599                 goto err_batch;
600
601         for (n = 0; n < smoke.ncontext; n++) {
602                 smoke.contexts[n] = kernel_context(smoke.i915);
603                 if (!smoke.contexts[n])
604                         goto err_ctx;
605         }
606
607         for (n = 0; n < ARRAY_SIZE(phase); n++) {
608                 err = smoke_crescendo(&smoke, phase[n]);
609                 if (err)
610                         goto err_ctx;
611
612                 err = smoke_random(&smoke, phase[n]);
613                 if (err)
614                         goto err_ctx;
615         }
616
617 err_ctx:
618         if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
619                 err = -EIO;
620
621         for (n = 0; n < smoke.ncontext; n++) {
622                 if (!smoke.contexts[n])
623                         break;
624                 kernel_context_close(smoke.contexts[n]);
625         }
626
627 err_batch:
628         i915_gem_object_put(smoke.batch);
629 err_unlock:
630         intel_runtime_pm_put(smoke.i915);
631         mutex_unlock(&smoke.i915->drm.struct_mutex);
632         kfree(smoke.contexts);
633
634         return err;
635 }
636
637 int intel_execlists_live_selftests(struct drm_i915_private *i915)
638 {
639         static const struct i915_subtest tests[] = {
640                 SUBTEST(live_sanitycheck),
641                 SUBTEST(live_preempt),
642                 SUBTEST(live_late_preempt),
643                 SUBTEST(live_preempt_hang),
644                 SUBTEST(live_preempt_smoke),
645         };
646
647         if (!HAS_EXECLISTS(i915))
648                 return 0;
649
650         if (i915_terminally_wedged(&i915->gpu_error))
651                 return 0;
652
653         return i915_subtests(tests, i915);
654 }
This page took 0.0744860000000001 seconds and 4 git commands to generate.