]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/selftests/i915_request.c
Merge branch 'sched-wait-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / drivers / gpu / drm / i915 / selftests / i915_request.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/prime_numbers.h>
26
27 #include "../i915_selftest.h"
28
29 #include "mock_context.h"
30 #include "mock_gem_device.h"
31
32 static int igt_add_request(void *arg)
33 {
34         struct drm_i915_private *i915 = arg;
35         struct i915_request *request;
36         int err = -ENOMEM;
37
38         /* Basic preliminary test to create a request and let it loose! */
39
40         mutex_lock(&i915->drm.struct_mutex);
41         request = mock_request(i915->engine[RCS],
42                                i915->kernel_context,
43                                HZ / 10);
44         if (!request)
45                 goto out_unlock;
46
47         i915_request_add(request);
48
49         err = 0;
50 out_unlock:
51         mutex_unlock(&i915->drm.struct_mutex);
52         return err;
53 }
54
55 static int igt_wait_request(void *arg)
56 {
57         const long T = HZ / 4;
58         struct drm_i915_private *i915 = arg;
59         struct i915_request *request;
60         int err = -EINVAL;
61
62         /* Submit a request, then wait upon it */
63
64         mutex_lock(&i915->drm.struct_mutex);
65         request = mock_request(i915->engine[RCS], i915->kernel_context, T);
66         if (!request) {
67                 err = -ENOMEM;
68                 goto out_unlock;
69         }
70
71         if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
72                 pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
73                 goto out_unlock;
74         }
75
76         if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) {
77                 pr_err("request wait succeeded (expected timeout before submit!)\n");
78                 goto out_unlock;
79         }
80
81         if (i915_request_completed(request)) {
82                 pr_err("request completed before submit!!\n");
83                 goto out_unlock;
84         }
85
86         i915_request_add(request);
87
88         if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
89                 pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
90                 goto out_unlock;
91         }
92
93         if (i915_request_completed(request)) {
94                 pr_err("request completed immediately!\n");
95                 goto out_unlock;
96         }
97
98         if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
99                 pr_err("request wait succeeded (expected timeout!)\n");
100                 goto out_unlock;
101         }
102
103         if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
104                 pr_err("request wait timed out!\n");
105                 goto out_unlock;
106         }
107
108         if (!i915_request_completed(request)) {
109                 pr_err("request not complete after waiting!\n");
110                 goto out_unlock;
111         }
112
113         if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
114                 pr_err("request wait timed out when already complete!\n");
115                 goto out_unlock;
116         }
117
118         err = 0;
119 out_unlock:
120         mock_device_flush(i915);
121         mutex_unlock(&i915->drm.struct_mutex);
122         return err;
123 }
124
125 static int igt_fence_wait(void *arg)
126 {
127         const long T = HZ / 4;
128         struct drm_i915_private *i915 = arg;
129         struct i915_request *request;
130         int err = -EINVAL;
131
132         /* Submit a request, treat it as a fence and wait upon it */
133
134         mutex_lock(&i915->drm.struct_mutex);
135         request = mock_request(i915->engine[RCS], i915->kernel_context, T);
136         if (!request) {
137                 err = -ENOMEM;
138                 goto out_locked;
139         }
140         mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
141
142         if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
143                 pr_err("fence wait success before submit (expected timeout)!\n");
144                 goto out_device;
145         }
146
147         mutex_lock(&i915->drm.struct_mutex);
148         i915_request_add(request);
149         mutex_unlock(&i915->drm.struct_mutex);
150
151         if (dma_fence_is_signaled(&request->fence)) {
152                 pr_err("fence signaled immediately!\n");
153                 goto out_device;
154         }
155
156         if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
157                 pr_err("fence wait success after submit (expected timeout)!\n");
158                 goto out_device;
159         }
160
161         if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
162                 pr_err("fence wait timed out (expected success)!\n");
163                 goto out_device;
164         }
165
166         if (!dma_fence_is_signaled(&request->fence)) {
167                 pr_err("fence unsignaled after waiting!\n");
168                 goto out_device;
169         }
170
171         if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
172                 pr_err("fence wait timed out when complete (expected success)!\n");
173                 goto out_device;
174         }
175
176         err = 0;
177 out_device:
178         mutex_lock(&i915->drm.struct_mutex);
179 out_locked:
180         mock_device_flush(i915);
181         mutex_unlock(&i915->drm.struct_mutex);
182         return err;
183 }
184
185 static int igt_request_rewind(void *arg)
186 {
187         struct drm_i915_private *i915 = arg;
188         struct i915_request *request, *vip;
189         struct i915_gem_context *ctx[2];
190         int err = -EINVAL;
191
192         mutex_lock(&i915->drm.struct_mutex);
193         ctx[0] = mock_context(i915, "A");
194         request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
195         if (!request) {
196                 err = -ENOMEM;
197                 goto err_context_0;
198         }
199
200         i915_request_get(request);
201         i915_request_add(request);
202
203         ctx[1] = mock_context(i915, "B");
204         vip = mock_request(i915->engine[RCS], ctx[1], 0);
205         if (!vip) {
206                 err = -ENOMEM;
207                 goto err_context_1;
208         }
209
210         /* Simulate preemption by manual reordering */
211         if (!mock_cancel_request(request)) {
212                 pr_err("failed to cancel request (already executed)!\n");
213                 i915_request_add(vip);
214                 goto err_context_1;
215         }
216         i915_request_get(vip);
217         i915_request_add(vip);
218         rcu_read_lock();
219         request->engine->submit_request(request);
220         rcu_read_unlock();
221
222         mutex_unlock(&i915->drm.struct_mutex);
223
224         if (i915_request_wait(vip, 0, HZ) == -ETIME) {
225                 pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
226                        vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
227                 goto err;
228         }
229
230         if (i915_request_completed(request)) {
231                 pr_err("low priority request already completed\n");
232                 goto err;
233         }
234
235         err = 0;
236 err:
237         i915_request_put(vip);
238         mutex_lock(&i915->drm.struct_mutex);
239 err_context_1:
240         mock_context_close(ctx[1]);
241         i915_request_put(request);
242 err_context_0:
243         mock_context_close(ctx[0]);
244         mock_device_flush(i915);
245         mutex_unlock(&i915->drm.struct_mutex);
246         return err;
247 }
248
249 int i915_request_mock_selftests(void)
250 {
251         static const struct i915_subtest tests[] = {
252                 SUBTEST(igt_add_request),
253                 SUBTEST(igt_wait_request),
254                 SUBTEST(igt_fence_wait),
255                 SUBTEST(igt_request_rewind),
256         };
257         struct drm_i915_private *i915;
258         int err;
259
260         i915 = mock_gem_device();
261         if (!i915)
262                 return -ENOMEM;
263
264         err = i915_subtests(tests, i915);
265         drm_dev_unref(&i915->drm);
266
267         return err;
268 }
269
270 struct live_test {
271         struct drm_i915_private *i915;
272         const char *func;
273         const char *name;
274
275         unsigned int reset_count;
276 };
277
278 static int begin_live_test(struct live_test *t,
279                            struct drm_i915_private *i915,
280                            const char *func,
281                            const char *name)
282 {
283         int err;
284
285         t->i915 = i915;
286         t->func = func;
287         t->name = name;
288
289         err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
290         if (err) {
291                 pr_err("%s(%s): failed to idle before, with err=%d!",
292                        func, name, err);
293                 return err;
294         }
295
296         i915->gpu_error.missed_irq_rings = 0;
297         t->reset_count = i915_reset_count(&i915->gpu_error);
298
299         return 0;
300 }
301
302 static int end_live_test(struct live_test *t)
303 {
304         struct drm_i915_private *i915 = t->i915;
305
306         i915_retire_requests(i915);
307
308         if (wait_for(intel_engines_are_idle(i915), 10)) {
309                 pr_err("%s(%s): GPU not idle\n", t->func, t->name);
310                 return -EIO;
311         }
312
313         if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
314                 pr_err("%s(%s): GPU was reset %d times!\n",
315                        t->func, t->name,
316                        i915_reset_count(&i915->gpu_error) - t->reset_count);
317                 return -EIO;
318         }
319
320         if (i915->gpu_error.missed_irq_rings) {
321                 pr_err("%s(%s): Missed interrupts on engines %lx\n",
322                        t->func, t->name, i915->gpu_error.missed_irq_rings);
323                 return -EIO;
324         }
325
326         return 0;
327 }
328
329 static int live_nop_request(void *arg)
330 {
331         struct drm_i915_private *i915 = arg;
332         struct intel_engine_cs *engine;
333         struct live_test t;
334         unsigned int id;
335         int err = -ENODEV;
336
337         /* Submit various sized batches of empty requests, to each engine
338          * (individually), and wait for the batch to complete. We can check
339          * the overhead of submitting requests to the hardware.
340          */
341
342         mutex_lock(&i915->drm.struct_mutex);
343
344         for_each_engine(engine, i915, id) {
345                 IGT_TIMEOUT(end_time);
346                 struct i915_request *request;
347                 unsigned long n, prime;
348                 ktime_t times[2] = {};
349
350                 err = begin_live_test(&t, i915, __func__, engine->name);
351                 if (err)
352                         goto out_unlock;
353
354                 for_each_prime_number_from(prime, 1, 8192) {
355                         times[1] = ktime_get_raw();
356
357                         for (n = 0; n < prime; n++) {
358                                 request = i915_request_alloc(engine,
359                                                              i915->kernel_context);
360                                 if (IS_ERR(request)) {
361                                         err = PTR_ERR(request);
362                                         goto out_unlock;
363                                 }
364
365                                 /* This space is left intentionally blank.
366                                  *
367                                  * We do not actually want to perform any
368                                  * action with this request, we just want
369                                  * to measure the latency in allocation
370                                  * and submission of our breadcrumbs -
371                                  * ensuring that the bare request is sufficient
372                                  * for the system to work (i.e. proper HEAD
373                                  * tracking of the rings, interrupt handling,
374                                  * etc). It also gives us the lowest bounds
375                                  * for latency.
376                                  */
377
378                                 i915_request_add(request);
379                         }
380                         i915_request_wait(request,
381                                           I915_WAIT_LOCKED,
382                                           MAX_SCHEDULE_TIMEOUT);
383
384                         times[1] = ktime_sub(ktime_get_raw(), times[1]);
385                         if (prime == 1)
386                                 times[0] = times[1];
387
388                         if (__igt_timeout(end_time, NULL))
389                                 break;
390                 }
391
392                 err = end_live_test(&t);
393                 if (err)
394                         goto out_unlock;
395
396                 pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
397                         engine->name,
398                         ktime_to_ns(times[0]),
399                         prime, div64_u64(ktime_to_ns(times[1]), prime));
400         }
401
402 out_unlock:
403         mutex_unlock(&i915->drm.struct_mutex);
404         return err;
405 }
406
407 static struct i915_vma *empty_batch(struct drm_i915_private *i915)
408 {
409         struct drm_i915_gem_object *obj;
410         struct i915_vma *vma;
411         u32 *cmd;
412         int err;
413
414         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
415         if (IS_ERR(obj))
416                 return ERR_CAST(obj);
417
418         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
419         if (IS_ERR(cmd)) {
420                 err = PTR_ERR(cmd);
421                 goto err;
422         }
423
424         *cmd = MI_BATCH_BUFFER_END;
425         i915_gem_chipset_flush(i915);
426
427         i915_gem_object_unpin_map(obj);
428
429         err = i915_gem_object_set_to_gtt_domain(obj, false);
430         if (err)
431                 goto err;
432
433         vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
434         if (IS_ERR(vma)) {
435                 err = PTR_ERR(vma);
436                 goto err;
437         }
438
439         err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
440         if (err)
441                 goto err;
442
443         return vma;
444
445 err:
446         i915_gem_object_put(obj);
447         return ERR_PTR(err);
448 }
449
450 static struct i915_request *
451 empty_request(struct intel_engine_cs *engine,
452               struct i915_vma *batch)
453 {
454         struct i915_request *request;
455         int err;
456
457         request = i915_request_alloc(engine, engine->i915->kernel_context);
458         if (IS_ERR(request))
459                 return request;
460
461         err = engine->emit_bb_start(request,
462                                     batch->node.start,
463                                     batch->node.size,
464                                     I915_DISPATCH_SECURE);
465         if (err)
466                 goto out_request;
467
468 out_request:
469         __i915_request_add(request, err == 0);
470         return err ? ERR_PTR(err) : request;
471 }
472
473 static int live_empty_request(void *arg)
474 {
475         struct drm_i915_private *i915 = arg;
476         struct intel_engine_cs *engine;
477         struct live_test t;
478         struct i915_vma *batch;
479         unsigned int id;
480         int err = 0;
481
482         /* Submit various sized batches of empty requests, to each engine
483          * (individually), and wait for the batch to complete. We can check
484          * the overhead of submitting requests to the hardware.
485          */
486
487         mutex_lock(&i915->drm.struct_mutex);
488
489         batch = empty_batch(i915);
490         if (IS_ERR(batch)) {
491                 err = PTR_ERR(batch);
492                 goto out_unlock;
493         }
494
495         for_each_engine(engine, i915, id) {
496                 IGT_TIMEOUT(end_time);
497                 struct i915_request *request;
498                 unsigned long n, prime;
499                 ktime_t times[2] = {};
500
501                 err = begin_live_test(&t, i915, __func__, engine->name);
502                 if (err)
503                         goto out_batch;
504
505                 /* Warmup / preload */
506                 request = empty_request(engine, batch);
507                 if (IS_ERR(request)) {
508                         err = PTR_ERR(request);
509                         goto out_batch;
510                 }
511                 i915_request_wait(request,
512                                   I915_WAIT_LOCKED,
513                                   MAX_SCHEDULE_TIMEOUT);
514
515                 for_each_prime_number_from(prime, 1, 8192) {
516                         times[1] = ktime_get_raw();
517
518                         for (n = 0; n < prime; n++) {
519                                 request = empty_request(engine, batch);
520                                 if (IS_ERR(request)) {
521                                         err = PTR_ERR(request);
522                                         goto out_batch;
523                                 }
524                         }
525                         i915_request_wait(request,
526                                           I915_WAIT_LOCKED,
527                                           MAX_SCHEDULE_TIMEOUT);
528
529                         times[1] = ktime_sub(ktime_get_raw(), times[1]);
530                         if (prime == 1)
531                                 times[0] = times[1];
532
533                         if (__igt_timeout(end_time, NULL))
534                                 break;
535                 }
536
537                 err = end_live_test(&t);
538                 if (err)
539                         goto out_batch;
540
541                 pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
542                         engine->name,
543                         ktime_to_ns(times[0]),
544                         prime, div64_u64(ktime_to_ns(times[1]), prime));
545         }
546
547 out_batch:
548         i915_vma_unpin(batch);
549         i915_vma_put(batch);
550 out_unlock:
551         mutex_unlock(&i915->drm.struct_mutex);
552         return err;
553 }
554
555 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
556 {
557         struct i915_gem_context *ctx = i915->kernel_context;
558         struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
559         struct drm_i915_gem_object *obj;
560         const int gen = INTEL_GEN(i915);
561         struct i915_vma *vma;
562         u32 *cmd;
563         int err;
564
565         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
566         if (IS_ERR(obj))
567                 return ERR_CAST(obj);
568
569         vma = i915_vma_instance(obj, vm, NULL);
570         if (IS_ERR(vma)) {
571                 err = PTR_ERR(vma);
572                 goto err;
573         }
574
575         err = i915_vma_pin(vma, 0, 0, PIN_USER);
576         if (err)
577                 goto err;
578
579         err = i915_gem_object_set_to_wc_domain(obj, true);
580         if (err)
581                 goto err;
582
583         cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
584         if (IS_ERR(cmd)) {
585                 err = PTR_ERR(cmd);
586                 goto err;
587         }
588
589         if (gen >= 8) {
590                 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
591                 *cmd++ = lower_32_bits(vma->node.start);
592                 *cmd++ = upper_32_bits(vma->node.start);
593         } else if (gen >= 6) {
594                 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
595                 *cmd++ = lower_32_bits(vma->node.start);
596         } else if (gen >= 4) {
597                 *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
598                 *cmd++ = lower_32_bits(vma->node.start);
599         } else {
600                 *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | 1;
601                 *cmd++ = lower_32_bits(vma->node.start);
602         }
603         *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
604         i915_gem_chipset_flush(i915);
605
606         i915_gem_object_unpin_map(obj);
607
608         return vma;
609
610 err:
611         i915_gem_object_put(obj);
612         return ERR_PTR(err);
613 }
614
615 static int recursive_batch_resolve(struct i915_vma *batch)
616 {
617         u32 *cmd;
618
619         cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
620         if (IS_ERR(cmd))
621                 return PTR_ERR(cmd);
622
623         *cmd = MI_BATCH_BUFFER_END;
624         i915_gem_chipset_flush(batch->vm->i915);
625
626         i915_gem_object_unpin_map(batch->obj);
627
628         return 0;
629 }
630
631 static int live_all_engines(void *arg)
632 {
633         struct drm_i915_private *i915 = arg;
634         struct intel_engine_cs *engine;
635         struct i915_request *request[I915_NUM_ENGINES];
636         struct i915_vma *batch;
637         struct live_test t;
638         unsigned int id;
639         int err;
640
641         /* Check we can submit requests to all engines simultaneously. We
642          * send a recursive batch to each engine - checking that we don't
643          * block doing so, and that they don't complete too soon.
644          */
645
646         mutex_lock(&i915->drm.struct_mutex);
647
648         err = begin_live_test(&t, i915, __func__, "");
649         if (err)
650                 goto out_unlock;
651
652         batch = recursive_batch(i915);
653         if (IS_ERR(batch)) {
654                 err = PTR_ERR(batch);
655                 pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
656                 goto out_unlock;
657         }
658
659         for_each_engine(engine, i915, id) {
660                 request[id] = i915_request_alloc(engine, i915->kernel_context);
661                 if (IS_ERR(request[id])) {
662                         err = PTR_ERR(request[id]);
663                         pr_err("%s: Request allocation failed with err=%d\n",
664                                __func__, err);
665                         goto out_request;
666                 }
667
668                 err = engine->emit_bb_start(request[id],
669                                             batch->node.start,
670                                             batch->node.size,
671                                             0);
672                 GEM_BUG_ON(err);
673                 request[id]->batch = batch;
674
675                 if (!i915_gem_object_has_active_reference(batch->obj)) {
676                         i915_gem_object_get(batch->obj);
677                         i915_gem_object_set_active_reference(batch->obj);
678                 }
679
680                 i915_vma_move_to_active(batch, request[id], 0);
681                 i915_request_get(request[id]);
682                 i915_request_add(request[id]);
683         }
684
685         for_each_engine(engine, i915, id) {
686                 if (i915_request_completed(request[id])) {
687                         pr_err("%s(%s): request completed too early!\n",
688                                __func__, engine->name);
689                         err = -EINVAL;
690                         goto out_request;
691                 }
692         }
693
694         err = recursive_batch_resolve(batch);
695         if (err) {
696                 pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
697                 goto out_request;
698         }
699
700         for_each_engine(engine, i915, id) {
701                 long timeout;
702
703                 timeout = i915_request_wait(request[id],
704                                             I915_WAIT_LOCKED,
705                                             MAX_SCHEDULE_TIMEOUT);
706                 if (timeout < 0) {
707                         err = timeout;
708                         pr_err("%s: error waiting for request on %s, err=%d\n",
709                                __func__, engine->name, err);
710                         goto out_request;
711                 }
712
713                 GEM_BUG_ON(!i915_request_completed(request[id]));
714                 i915_request_put(request[id]);
715                 request[id] = NULL;
716         }
717
718         err = end_live_test(&t);
719
720 out_request:
721         for_each_engine(engine, i915, id)
722                 if (request[id])
723                         i915_request_put(request[id]);
724         i915_vma_unpin(batch);
725         i915_vma_put(batch);
726 out_unlock:
727         mutex_unlock(&i915->drm.struct_mutex);
728         return err;
729 }
730
731 static int live_sequential_engines(void *arg)
732 {
733         struct drm_i915_private *i915 = arg;
734         struct i915_request *request[I915_NUM_ENGINES] = {};
735         struct i915_request *prev = NULL;
736         struct intel_engine_cs *engine;
737         struct live_test t;
738         unsigned int id;
739         int err;
740
741         /* Check we can submit requests to all engines sequentially, such
742          * that each successive request waits for the earlier ones. This
743          * tests that we don't execute requests out of order, even though
744          * they are running on independent engines.
745          */
746
747         mutex_lock(&i915->drm.struct_mutex);
748
749         err = begin_live_test(&t, i915, __func__, "");
750         if (err)
751                 goto out_unlock;
752
753         for_each_engine(engine, i915, id) {
754                 struct i915_vma *batch;
755
756                 batch = recursive_batch(i915);
757                 if (IS_ERR(batch)) {
758                         err = PTR_ERR(batch);
759                         pr_err("%s: Unable to create batch for %s, err=%d\n",
760                                __func__, engine->name, err);
761                         goto out_unlock;
762                 }
763
764                 request[id] = i915_request_alloc(engine, i915->kernel_context);
765                 if (IS_ERR(request[id])) {
766                         err = PTR_ERR(request[id]);
767                         pr_err("%s: Request allocation failed for %s with err=%d\n",
768                                __func__, engine->name, err);
769                         goto out_request;
770                 }
771
772                 if (prev) {
773                         err = i915_request_await_dma_fence(request[id],
774                                                            &prev->fence);
775                         if (err) {
776                                 i915_request_add(request[id]);
777                                 pr_err("%s: Request await failed for %s with err=%d\n",
778                                        __func__, engine->name, err);
779                                 goto out_request;
780                         }
781                 }
782
783                 err = engine->emit_bb_start(request[id],
784                                             batch->node.start,
785                                             batch->node.size,
786                                             0);
787                 GEM_BUG_ON(err);
788                 request[id]->batch = batch;
789
790                 i915_vma_move_to_active(batch, request[id], 0);
791                 i915_gem_object_set_active_reference(batch->obj);
792                 i915_vma_get(batch);
793
794                 i915_request_get(request[id]);
795                 i915_request_add(request[id]);
796
797                 prev = request[id];
798         }
799
800         for_each_engine(engine, i915, id) {
801                 long timeout;
802
803                 if (i915_request_completed(request[id])) {
804                         pr_err("%s(%s): request completed too early!\n",
805                                __func__, engine->name);
806                         err = -EINVAL;
807                         goto out_request;
808                 }
809
810                 err = recursive_batch_resolve(request[id]->batch);
811                 if (err) {
812                         pr_err("%s: failed to resolve batch, err=%d\n",
813                                __func__, err);
814                         goto out_request;
815                 }
816
817                 timeout = i915_request_wait(request[id],
818                                             I915_WAIT_LOCKED,
819                                             MAX_SCHEDULE_TIMEOUT);
820                 if (timeout < 0) {
821                         err = timeout;
822                         pr_err("%s: error waiting for request on %s, err=%d\n",
823                                __func__, engine->name, err);
824                         goto out_request;
825                 }
826
827                 GEM_BUG_ON(!i915_request_completed(request[id]));
828         }
829
830         err = end_live_test(&t);
831
832 out_request:
833         for_each_engine(engine, i915, id) {
834                 u32 *cmd;
835
836                 if (!request[id])
837                         break;
838
839                 cmd = i915_gem_object_pin_map(request[id]->batch->obj,
840                                               I915_MAP_WC);
841                 if (!IS_ERR(cmd)) {
842                         *cmd = MI_BATCH_BUFFER_END;
843                         i915_gem_chipset_flush(i915);
844
845                         i915_gem_object_unpin_map(request[id]->batch->obj);
846                 }
847
848                 i915_vma_put(request[id]->batch);
849                 i915_request_put(request[id]);
850         }
851 out_unlock:
852         mutex_unlock(&i915->drm.struct_mutex);
853         return err;
854 }
855
856 int i915_request_live_selftests(struct drm_i915_private *i915)
857 {
858         static const struct i915_subtest tests[] = {
859                 SUBTEST(live_nop_request),
860                 SUBTEST(live_all_engines),
861                 SUBTEST(live_sequential_engines),
862                 SUBTEST(live_empty_request),
863         };
864         return i915_subtests(tests, i915);
865 }
This page took 0.082888 seconds and 4 git commands to generate.