2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/completion.h>
26 #include <linux/delay.h>
27 #include <linux/prime_numbers.h>
29 #include "../i915_selftest.h"
32 fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
39 /* Leave the fence for the caller to free it after testing */
46 static struct i915_sw_fence *alloc_fence(void)
48 struct i915_sw_fence *fence;
50 fence = kmalloc(sizeof(*fence), GFP_KERNEL);
54 i915_sw_fence_init(fence, fence_notify);
58 static void free_fence(struct i915_sw_fence *fence)
60 i915_sw_fence_fini(fence);
64 static int __test_self(struct i915_sw_fence *fence)
66 if (i915_sw_fence_done(fence))
69 i915_sw_fence_commit(fence);
70 if (!i915_sw_fence_done(fence))
73 i915_sw_fence_wait(fence);
74 if (!i915_sw_fence_done(fence))
80 static int test_self(void *arg)
82 struct i915_sw_fence *fence;
85 /* Test i915_sw_fence signaling and completion testing */
86 fence = alloc_fence();
90 ret = __test_self(fence);
96 static int test_dag(void *arg)
98 struct i915_sw_fence *A, *B, *C;
101 /* Test detection of cycles within the i915_sw_fence graphs */
102 if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG))
109 if (i915_sw_fence_await_sw_fence_gfp(A, A, GFP_KERNEL) != -EINVAL) {
110 pr_err("recursive cycle not detected (AA)\n");
120 i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
121 if (i915_sw_fence_await_sw_fence_gfp(B, A, GFP_KERNEL) != -EINVAL) {
122 pr_err("single depth cycle not detected (BAB)\n");
132 if (i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL) == -EINVAL) {
133 pr_err("invalid cycle detected\n");
136 if (i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL) != -EINVAL) {
137 pr_err("single depth cycle not detected (CBC)\n");
140 if (i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL) != -EINVAL) {
141 pr_err("cycle not detected (BA, CB, AC)\n");
144 if (i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL) == -EINVAL) {
145 pr_err("invalid cycle detected\n");
149 i915_sw_fence_commit(A);
150 i915_sw_fence_commit(B);
151 i915_sw_fence_commit(C);
154 if (!i915_sw_fence_done(C)) {
155 pr_err("fence C not done\n");
158 if (!i915_sw_fence_done(B)) {
159 pr_err("fence B not done\n");
162 if (!i915_sw_fence_done(A)) {
163 pr_err("fence A not done\n");
175 static int test_AB(void *arg)
177 struct i915_sw_fence *A, *B;
180 /* Test i915_sw_fence (A) waiting on an event source (B) */
190 ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
194 pr_err("Incorrectly reported fence A was complete before await\n");
200 i915_sw_fence_commit(A);
201 if (i915_sw_fence_done(A))
204 i915_sw_fence_commit(B);
205 if (!i915_sw_fence_done(B)) {
206 pr_err("Fence B is not done\n");
210 if (!i915_sw_fence_done(A)) {
211 pr_err("Fence A is not done\n");
223 static int test_ABC(void *arg)
225 struct i915_sw_fence *A, *B, *C;
228 /* Test a chain of fences, A waits on B who waits on C */
245 ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
249 pr_err("Incorrectly reported fence B was complete before await\n");
253 ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL);
257 pr_err("Incorrectly reported fence C was complete before await\n");
262 i915_sw_fence_commit(A);
263 if (i915_sw_fence_done(A)) {
264 pr_err("Fence A completed early\n");
268 i915_sw_fence_commit(B);
269 if (i915_sw_fence_done(B)) {
270 pr_err("Fence B completed early\n");
274 if (i915_sw_fence_done(A)) {
275 pr_err("Fence A completed early (after signaling B)\n");
279 i915_sw_fence_commit(C);
282 if (!i915_sw_fence_done(C)) {
283 pr_err("Fence C not done\n");
286 if (!i915_sw_fence_done(B)) {
287 pr_err("Fence B not done\n");
290 if (!i915_sw_fence_done(A)) {
291 pr_err("Fence A not done\n");
303 static int test_AB_C(void *arg)
305 struct i915_sw_fence *A, *B, *C;
308 /* Test multiple fences (AB) waiting on a single event (C) */
325 ret = i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL);
333 ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL);
341 i915_sw_fence_commit(A);
342 i915_sw_fence_commit(B);
345 if (i915_sw_fence_done(A)) {
346 pr_err("Fence A completed early\n");
350 if (i915_sw_fence_done(B)) {
351 pr_err("Fence B completed early\n");
355 i915_sw_fence_commit(C);
356 if (!i915_sw_fence_done(C)) {
357 pr_err("Fence C not done\n");
361 if (!i915_sw_fence_done(B)) {
362 pr_err("Fence B not done\n");
366 if (!i915_sw_fence_done(A)) {
367 pr_err("Fence A not done\n");
380 static int test_C_AB(void *arg)
382 struct i915_sw_fence *A, *B, *C;
385 /* Test multiple event sources (A,B) for a single fence (C) */
402 ret = i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL);
410 ret = i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL);
419 i915_sw_fence_commit(C);
420 if (i915_sw_fence_done(C))
423 i915_sw_fence_commit(A);
424 i915_sw_fence_commit(B);
426 if (!i915_sw_fence_done(A)) {
427 pr_err("Fence A not done\n");
431 if (!i915_sw_fence_done(B)) {
432 pr_err("Fence B not done\n");
436 if (!i915_sw_fence_done(C)) {
437 pr_err("Fence C not done\n");
450 static int test_chain(void *arg)
453 struct i915_sw_fence **fences;
456 /* Test a long chain of fences */
457 fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL);
461 for (i = 0; i < nfences; i++) {
462 fences[i] = alloc_fence();
470 ret = i915_sw_fence_await_sw_fence_gfp(fences[i],
478 i915_sw_fence_commit(fences[i]);
483 for (i = nfences; --i; ) {
484 if (i915_sw_fence_done(fences[i])) {
486 pr_err("Fence[%d] completed early\n", i);
490 i915_sw_fence_commit(fences[0]);
491 for (i = 0; ret == 0 && i < nfences; i++) {
492 if (!i915_sw_fence_done(fences[i])) {
493 pr_err("Fence[%d] is not done\n", i);
499 for (i = 0; i < nfences; i++)
500 free_fence(fences[i]);
506 struct work_struct work;
507 struct completion started;
508 struct i915_sw_fence *in, *out;
512 static void task_ipc(struct work_struct *work)
514 struct task_ipc *ipc = container_of(work, typeof(*ipc), work);
516 complete(&ipc->started);
518 i915_sw_fence_wait(ipc->in);
519 smp_store_mb(ipc->value, 1);
520 i915_sw_fence_commit(ipc->out);
523 static int test_ipc(void *arg)
526 struct workqueue_struct *wq;
529 wq = alloc_workqueue("i1915-selftest", 0, 0);
533 /* Test use of i915_sw_fence as an interprocess signaling mechanism */
534 ipc.in = alloc_fence();
539 ipc.out = alloc_fence();
545 /* use a completion to avoid chicken-and-egg testing */
546 init_completion(&ipc.started);
549 INIT_WORK_ONSTACK(&ipc.work, task_ipc);
550 queue_work(wq, &ipc.work);
552 wait_for_completion(&ipc.started);
554 usleep_range(1000, 2000);
555 if (READ_ONCE(ipc.value)) {
556 pr_err("worker updated value before i915_sw_fence was signaled\n");
560 i915_sw_fence_commit(ipc.in);
561 i915_sw_fence_wait(ipc.out);
563 if (!READ_ONCE(ipc.value)) {
564 pr_err("worker signaled i915_sw_fence before value was posted\n");
568 flush_work(&ipc.work);
569 destroy_work_on_stack(&ipc.work);
574 destroy_workqueue(wq);
579 static int test_timer(void *arg)
581 unsigned long target, delay;
582 struct timed_fence tf;
585 timed_fence_init(&tf, target = jiffies);
586 if (!i915_sw_fence_done(&tf.fence)) {
587 pr_err("Fence with immediate expiration not signaled\n");
591 timed_fence_fini(&tf);
593 for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
595 timed_fence_init(&tf, target = jiffies + delay);
596 if (i915_sw_fence_done(&tf.fence)) {
597 pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
602 i915_sw_fence_wait(&tf.fence);
605 if (!i915_sw_fence_done(&tf.fence)) {
606 pr_err("Fence not signaled after wait\n");
609 if (time_before(jiffies, target)) {
610 pr_err("Fence signaled too early, target=%lu, now=%lu\n",
615 timed_fence_fini(&tf);
622 timed_fence_fini(&tf);
626 static const char *mock_name(struct dma_fence *fence)
631 static const struct dma_fence_ops mock_fence_ops = {
632 .get_driver_name = mock_name,
633 .get_timeline_name = mock_name,
636 static DEFINE_SPINLOCK(mock_fence_lock);
638 static struct dma_fence *alloc_dma_fence(void)
640 struct dma_fence *dma;
642 dma = kmalloc(sizeof(*dma), GFP_KERNEL);
644 dma_fence_init(dma, &mock_fence_ops, &mock_fence_lock, 0, 0);
649 static struct i915_sw_fence *
650 wrap_dma_fence(struct dma_fence *dma, unsigned long delay)
652 struct i915_sw_fence *fence;
655 fence = alloc_fence();
657 return ERR_PTR(-ENOMEM);
659 err = i915_sw_fence_await_dma_fence(fence, dma, delay, GFP_NOWAIT);
660 i915_sw_fence_commit(fence);
669 static int test_dma_fence(void *arg)
671 struct i915_sw_fence *timeout = NULL, *not = NULL;
672 unsigned long delay = i915_selftest.timeout_jiffies;
673 unsigned long end, sleep;
674 struct dma_fence *dma;
677 dma = alloc_dma_fence();
681 timeout = wrap_dma_fence(dma, delay);
682 if (IS_ERR(timeout)) {
683 err = PTR_ERR(timeout);
687 not = wrap_dma_fence(dma, 0);
694 if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) {
695 pr_err("Fences immediately signaled\n");
699 /* We round the timeout for the fence up to the next second */
700 end = round_jiffies_up(jiffies + delay);
702 sleep = jiffies_to_usecs(delay) / 3;
703 usleep_range(sleep, 2 * sleep);
704 if (time_after(jiffies, end)) {
705 pr_debug("Slept too long, delay=%lu, (target=%lu, now=%lu) skipping\n",
706 delay, end, jiffies);
710 if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) {
711 pr_err("Fences signaled too early\n");
715 if (!wait_event_timeout(timeout->wait,
716 i915_sw_fence_done(timeout),
717 2 * (end - jiffies) + 1)) {
718 pr_err("Timeout fence unsignaled!\n");
722 if (i915_sw_fence_done(not)) {
723 pr_err("No timeout fence signaled!\n");
728 dma_fence_signal(dma);
730 if (!i915_sw_fence_done(timeout) || !i915_sw_fence_done(not)) {
731 pr_err("Fences unsignaled\n");
742 dma_fence_signal(dma);
743 if (!IS_ERR_OR_NULL(timeout))
745 if (!IS_ERR_OR_NULL(not))
751 int i915_sw_fence_mock_selftests(void)
753 static const struct i915_subtest tests[] = {
763 SUBTEST(test_dma_fence),
766 return i915_subtests(tests, NULL);