2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/kref.h>
8 #include <linux/string_helpers.h>
10 #include "gem/i915_gem_pm.h"
11 #include "gt/intel_gt.h"
13 #include "i915_selftest.h"
15 #include "igt_flush_test.h"
16 #include "lib_sw_fence.h"
19 struct i915_active base;
24 static void __live_get(struct live_active *active)
26 kref_get(&active->ref);
29 static void __live_free(struct live_active *active)
31 i915_active_fini(&active->base);
35 static void __live_release(struct kref *ref)
37 struct live_active *active = container_of(ref, typeof(*active), ref);
42 static void __live_put(struct live_active *active)
44 kref_put(&active->ref, __live_release);
47 static int __live_active(struct i915_active *base)
49 struct live_active *active = container_of(base, typeof(*active), base);
55 static void __live_retire(struct i915_active *base)
57 struct live_active *active = container_of(base, typeof(*active), base);
59 active->retired = true;
63 static struct live_active *__live_alloc(struct drm_i915_private *i915)
65 struct live_active *active;
67 active = kzalloc(sizeof(*active), GFP_KERNEL);
71 kref_init(&active->ref);
72 i915_active_init(&active->base, __live_active, __live_retire, 0);
77 static struct live_active *
78 __live_active_setup(struct drm_i915_private *i915)
80 struct intel_engine_cs *engine;
81 struct i915_sw_fence *submit;
82 struct live_active *active;
83 unsigned int count = 0;
86 active = __live_alloc(i915);
88 return ERR_PTR(-ENOMEM);
90 submit = heap_fence_create(GFP_KERNEL);
93 return ERR_PTR(-ENOMEM);
96 err = i915_active_acquire(&active->base);
100 for_each_uabi_engine(engine, i915) {
101 struct i915_request *rq;
103 rq = intel_engine_create_kernel_request(engine);
109 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
113 err = i915_active_add_request(&active->base, rq);
114 i915_request_add(rq);
116 pr_err("Failed to track active ref!\n");
123 i915_active_release(&active->base);
124 if (READ_ONCE(active->retired) && count) {
125 pr_err("i915_active retired before submission!\n");
128 if (atomic_read(&active->base.count) != count) {
129 pr_err("i915_active not tracking all requests, found %d, expected %d\n",
130 atomic_read(&active->base.count), count);
135 i915_sw_fence_commit(submit);
136 heap_fence_put(submit);
139 active = ERR_PTR(err);
145 static int live_active_wait(void *arg)
147 struct drm_i915_private *i915 = arg;
148 struct live_active *active;
151 /* Check that we get a callback when requests retire upon waiting */
153 active = __live_active_setup(i915);
155 return PTR_ERR(active);
157 __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
158 if (!READ_ONCE(active->retired)) {
159 struct drm_printer p = drm_err_printer(__func__);
161 pr_err("i915_active not retired after waiting!\n");
162 i915_active_print(&active->base, &p);
169 if (igt_flush_test(i915))
175 static int live_active_retire(void *arg)
177 struct drm_i915_private *i915 = arg;
178 struct live_active *active;
181 /* Check that we get a callback when requests are indirectly retired */
183 active = __live_active_setup(i915);
185 return PTR_ERR(active);
187 /* waits for & retires all requests */
188 if (igt_flush_test(i915))
191 if (!READ_ONCE(active->retired)) {
192 struct drm_printer p = drm_err_printer(__func__);
194 pr_err("i915_active not retired after flushing!\n");
195 i915_active_print(&active->base, &p);
205 static int live_active_barrier(void *arg)
207 struct drm_i915_private *i915 = arg;
208 struct intel_engine_cs *engine;
209 struct live_active *active;
212 /* Check that we get a callback when requests retire upon waiting */
214 active = __live_alloc(i915);
218 err = i915_active_acquire(&active->base);
222 for_each_uabi_engine(engine, i915) {
223 err = i915_active_acquire_preallocate_barrier(&active->base,
228 i915_active_acquire_barrier(&active->base);
231 i915_active_release(&active->base);
235 __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
236 if (!READ_ONCE(active->retired)) {
237 pr_err("i915_active not retired after flushing barriers!\n");
244 if (igt_flush_test(i915))
250 int i915_active_live_selftests(struct drm_i915_private *i915)
252 static const struct i915_subtest tests[] = {
253 SUBTEST(live_active_wait),
254 SUBTEST(live_active_retire),
255 SUBTEST(live_active_barrier),
258 if (intel_gt_is_wedged(to_gt(i915)))
261 return i915_subtests(tests, i915);
264 static struct intel_engine_cs *node_to_barrier(struct active_node *it)
266 struct intel_engine_cs *engine;
268 if (!is_barrier(&it->base))
271 engine = __barrier_to_engine(it);
272 smp_rmb(); /* serialise with add_active_barriers */
273 if (!is_barrier(&it->base))
279 void i915_active_print(struct i915_active *ref, struct drm_printer *m)
281 drm_printf(m, "active %ps:%ps\n", ref->active, ref->retire);
282 drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
283 drm_printf(m, "\tpreallocated barriers? %s\n",
284 str_yes_no(!llist_empty(&ref->preallocated_barriers)));
286 if (i915_active_acquire_if_busy(ref)) {
287 struct active_node *it, *n;
289 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
290 struct intel_engine_cs *engine;
292 engine = node_to_barrier(it);
294 drm_printf(m, "\tbarrier: %s\n", engine->name);
298 if (i915_active_fence_isset(&it->base)) {
300 "\ttimeline: %llx\n", it->timeline);
305 i915_active_release(ref);
309 static void spin_unlock_wait(spinlock_t *lock)
312 spin_unlock_irq(lock);
315 static void active_flush(struct i915_active *ref,
316 struct i915_active_fence *active)
318 struct dma_fence *fence;
320 fence = xchg(__active_fence_slot(active), NULL);
324 spin_lock_irq(fence->lock);
325 __list_del_entry(&active->cb.node);
326 spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */
327 atomic_dec(&ref->count);
329 GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
332 void i915_active_unlock_wait(struct i915_active *ref)
334 if (i915_active_acquire_if_busy(ref)) {
335 struct active_node *it, *n;
337 /* Wait for all active callbacks */
339 active_flush(ref, &ref->excl);
340 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)
341 active_flush(ref, &it->base);
344 i915_active_release(ref);
347 /* And wait for the retire callback */
348 spin_unlock_wait(&ref->tree_lock);
350 /* ... which may have been on a thread instead */
351 flush_work(&ref->work);