1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020-2021 Intel Corporation
6 #include "gt/intel_migrate.h"
7 #include "gt/intel_gpu_commands.h"
8 #include "gem/i915_gem_ttm_move.h"
10 #include "i915_deps.h"
12 #include "selftests/igt_reset.h"
13 #include "selftests/igt_spinner.h"
15 static int igt_fill_check_buffer(struct drm_i915_gem_object *obj,
18 struct drm_i915_private *i915 = to_i915(obj->base.dev);
19 unsigned int i, count = obj->base.size / sizeof(u32);
20 enum i915_map_type map_type =
21 i915_coherent_map_type(i915, obj, false);
25 assert_object_held(obj);
26 cur = i915_gem_object_pin_map(obj, map_type);
31 for (i = 0; i < count; ++i)
34 for (i = 0; i < count; ++i)
36 pr_err("Object content mismatch at location %d of %d\n", i, count);
41 i915_gem_object_unpin_map(obj);
46 static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
47 enum intel_region_id dst)
49 struct drm_i915_private *i915 = gt->i915;
50 struct intel_memory_region *src_mr = i915->mm.regions[src];
51 struct intel_memory_region *dst_mr = i915->mm.regions[dst];
52 struct drm_i915_gem_object *obj;
53 struct i915_gem_ww_ctx ww;
59 /* Switch object backing-store on create */
60 obj = i915_gem_object_create_region(src_mr, dst_mr->min_page_size, 0, 0);
64 for_i915_gem_ww(&ww, err, true) {
65 err = i915_gem_object_lock(obj, &ww);
69 err = igt_fill_check_buffer(obj, true);
73 err = i915_gem_object_migrate(obj, &ww, dst);
77 err = i915_gem_object_pin_pages(obj);
81 if (i915_gem_object_can_migrate(obj, src))
84 i915_gem_object_unpin_pages(obj);
85 err = i915_gem_object_wait_migration(obj, true);
89 err = igt_fill_check_buffer(obj, false);
91 i915_gem_object_put(obj);
96 static int igt_smem_create_migrate(void *arg)
98 return igt_create_migrate(arg, INTEL_REGION_LMEM_0, INTEL_REGION_SMEM);
101 static int igt_lmem_create_migrate(void *arg)
103 return igt_create_migrate(arg, INTEL_REGION_SMEM, INTEL_REGION_LMEM_0);
106 static int igt_same_create_migrate(void *arg)
108 return igt_create_migrate(arg, INTEL_REGION_LMEM_0, INTEL_REGION_LMEM_0);
111 static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
112 struct drm_i915_gem_object *obj,
113 struct i915_vma *vma,
118 err = i915_gem_object_lock(obj, ww);
123 err = i915_vma_pin_ww(vma, ww, obj->base.size, 0,
124 0UL | PIN_OFFSET_FIXED |
127 if (err != -EINTR && err != ERESTARTSYS &&
129 pr_err("Failed to pin vma.\n");
137 * Migration will implicitly unbind (asynchronously) any bound
140 if (i915_gem_object_is_lmem(obj)) {
141 err = i915_gem_object_migrate(obj, ww, INTEL_REGION_SMEM);
144 pr_err("Object failed migration to smem\n");
149 if (i915_gem_object_is_lmem(obj)) {
150 pr_err("object still backed by lmem\n");
154 if (!i915_gem_object_has_struct_page(obj)) {
155 pr_err("object not backed by struct page\n");
160 err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM_0);
163 pr_err("Object failed migration to lmem\n");
168 if (i915_gem_object_has_struct_page(obj)) {
169 pr_err("object still backed by struct page\n");
173 if (!i915_gem_object_is_lmem(obj)) {
174 pr_err("object not backed by lmem\n");
182 static int __igt_lmem_pages_migrate(struct intel_gt *gt,
183 struct i915_address_space *vm,
184 struct i915_deps *deps,
185 struct igt_spinner *spin,
186 struct dma_fence *spin_fence,
189 struct drm_i915_private *i915 = gt->i915;
190 struct drm_i915_gem_object *obj;
191 struct i915_vma *vma = NULL;
192 struct i915_gem_ww_ctx ww;
193 struct i915_request *rq;
197 /* From LMEM to shmem and back again */
199 obj = i915_gem_object_create_lmem(i915, SZ_2M, 0);
204 vma = i915_vma_instance(obj, vm, NULL);
211 /* Initial GPU fill, sync, CPU initialization. */
212 for_i915_gem_ww(&ww, err, true) {
213 err = i915_gem_object_lock(obj, &ww);
217 err = ____i915_gem_object_get_pages(obj);
221 err = intel_migrate_clear(>->migrate, &ww, deps,
222 obj->mm.pages->sgl, obj->pat_index,
223 i915_gem_object_is_lmem(obj),
226 err = dma_resv_reserve_fences(obj->base.resv, 1);
228 dma_resv_add_fence(obj->base.resv, &rq->fence,
229 DMA_RESV_USAGE_KERNEL);
230 i915_request_put(rq);
236 err = igt_fill_check_buffer(obj, true);
245 * Migrate to and from smem without explicitly syncing.
246 * Finalize with data in smem for fast readout.
248 for (i = 1; i <= 5; ++i) {
249 for_i915_gem_ww(&ww, err, true)
250 err = lmem_pages_migrate_one(&ww, obj, vma,
256 err = i915_gem_object_lock_interruptible(obj, NULL);
261 if (dma_fence_is_signaled(spin_fence)) {
262 pr_err("Spinner was terminated by hangcheck.\n");
266 igt_spinner_end(spin);
269 /* Finally sync migration and check content. */
270 err = i915_gem_object_wait_migration(obj, true);
275 err = i915_vma_wait_for_bind(vma);
279 err = igt_fill_check_buffer(obj, false);
283 i915_gem_object_unlock(obj);
285 i915_gem_object_put(obj);
290 static int igt_lmem_pages_failsafe_migrate(void *arg)
292 int fail_gpu, fail_alloc, ban_memcpy, ret;
293 struct intel_gt *gt = arg;
295 for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) {
296 for (fail_alloc = 0; fail_alloc < 2; ++fail_alloc) {
297 for (ban_memcpy = 0; ban_memcpy < 2; ++ban_memcpy) {
298 pr_info("Simulated failure modes: gpu: %d, alloc:%d, ban_memcpy: %d\n",
299 fail_gpu, fail_alloc, ban_memcpy);
300 i915_ttm_migrate_set_ban_memcpy(ban_memcpy);
301 i915_ttm_migrate_set_failure_modes(fail_gpu,
303 ret = __igt_lmem_pages_migrate(gt, NULL, NULL,
308 if (ban_memcpy && fail_gpu) {
309 struct intel_gt *__gt;
313 pr_err("expected -EIO, got (%d)\n", ret);
319 for_each_gt(__gt, gt->i915, id) {
320 intel_wakeref_t wakeref;
323 mutex_lock(&__gt->reset.mutex);
324 wedged = test_bit(I915_WEDGED, &__gt->reset.flags);
325 mutex_unlock(&__gt->reset.mutex);
327 if (fail_gpu && !fail_alloc) {
329 pr_err("gt(%u) not wedged\n", id);
334 pr_err("gt(%u) incorrectly wedged\n", id);
340 wakeref = intel_runtime_pm_get(__gt->uncore->rpm);
341 igt_global_reset_lock(__gt);
342 intel_gt_reset(__gt, ALL_ENGINES, NULL);
343 igt_global_reset_unlock(__gt);
344 intel_runtime_pm_put(__gt->uncore->rpm, wakeref);
354 i915_ttm_migrate_set_failure_modes(false, false);
355 i915_ttm_migrate_set_ban_memcpy(false);
360 * This subtest tests that unbinding at migration is indeed performed
361 * async. We launch a spinner and a number of migrations depending on
362 * that spinner to have terminated. Before each migration we bind a
363 * vma, which should then be async unbound by the migration operation.
364 * If we are able to schedule migrations without blocking while the
365 * spinner is still running, those unbinds are indeed async and non-
368 * Note that each async bind operation is awaiting the previous migration
369 * due to the moving fence resulting from the migration.
371 static int igt_async_migrate(struct intel_gt *gt)
373 struct intel_engine_cs *engine;
374 enum intel_engine_id id;
375 struct i915_ppgtt *ppgtt;
376 struct igt_spinner spin;
379 ppgtt = i915_ppgtt_create(gt, 0);
381 return PTR_ERR(ppgtt);
383 if (igt_spinner_init(&spin, gt)) {
388 for_each_engine(engine, gt, id) {
389 struct ttm_operation_ctx ctx = {
390 .interruptible = true
392 struct dma_fence *spin_fence;
393 struct intel_context *ce;
394 struct i915_request *rq;
395 struct i915_deps deps;
397 ce = intel_context_create(engine);
404 * Use MI_NOOP, making the spinner non-preemptible. If there
405 * is a code path where we fail async operation due to the
406 * running spinner, we will block and fail to end the
407 * spinner resulting in a deadlock. But with a non-
408 * preemptible spinner, hangcheck will terminate the spinner
409 * for us, and we will later detect that and fail the test.
411 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
412 intel_context_put(ce);
418 i915_deps_init(&deps, GFP_KERNEL);
419 err = i915_deps_add_dependency(&deps, &rq->fence, &ctx);
420 spin_fence = dma_fence_get(&rq->fence);
421 i915_request_add(rq);
425 err = __igt_lmem_pages_migrate(gt, &ppgtt->vm, &deps, &spin,
427 i915_deps_fini(&deps);
428 dma_fence_put(spin_fence);
434 igt_spinner_fini(&spin);
436 i915_vm_put(&ppgtt->vm);
442 * Setting ASYNC_FAIL_ALLOC to 2 will simulate memory allocation failure while
443 * arming the migration error check and block async migration. This
444 * will cause us to deadlock and hangcheck will terminate the spinner
445 * causing the test to fail.
447 #define ASYNC_FAIL_ALLOC 1
448 static int igt_lmem_async_migrate(void *arg)
450 int fail_gpu, fail_alloc, ban_memcpy, ret;
451 struct intel_gt *gt = arg;
453 for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) {
454 for (fail_alloc = 0; fail_alloc < ASYNC_FAIL_ALLOC; ++fail_alloc) {
455 for (ban_memcpy = 0; ban_memcpy < 2; ++ban_memcpy) {
456 pr_info("Simulated failure modes: gpu: %d, alloc: %d, ban_memcpy: %d\n",
457 fail_gpu, fail_alloc, ban_memcpy);
458 i915_ttm_migrate_set_ban_memcpy(ban_memcpy);
459 i915_ttm_migrate_set_failure_modes(fail_gpu,
461 ret = igt_async_migrate(gt);
463 if (fail_gpu && ban_memcpy) {
464 struct intel_gt *__gt;
468 pr_err("expected -EIO, got (%d)\n", ret);
474 for_each_gt(__gt, gt->i915, id) {
475 intel_wakeref_t wakeref;
478 mutex_lock(&__gt->reset.mutex);
479 wedged = test_bit(I915_WEDGED, &__gt->reset.flags);
480 mutex_unlock(&__gt->reset.mutex);
482 if (fail_gpu && !fail_alloc) {
484 pr_err("gt(%u) not wedged\n", id);
489 pr_err("gt(%u) incorrectly wedged\n", id);
495 wakeref = intel_runtime_pm_get(__gt->uncore->rpm);
496 igt_global_reset_lock(__gt);
497 intel_gt_reset(__gt, ALL_ENGINES, NULL);
498 igt_global_reset_unlock(__gt);
499 intel_runtime_pm_put(__gt->uncore->rpm, wakeref);
509 i915_ttm_migrate_set_failure_modes(false, false);
510 i915_ttm_migrate_set_ban_memcpy(false);
514 int i915_gem_migrate_live_selftests(struct drm_i915_private *i915)
516 static const struct i915_subtest tests[] = {
517 SUBTEST(igt_smem_create_migrate),
518 SUBTEST(igt_lmem_create_migrate),
519 SUBTEST(igt_same_create_migrate),
520 SUBTEST(igt_lmem_pages_failsafe_migrate),
521 SUBTEST(igt_lmem_async_migrate),
527 return intel_gt_live_subtests(tests, to_gt(i915));