1 // SPDX-License-Identifier: GPL-2.0 AND MIT
3 * Copyright © 2023 Intel Corporation
5 #include <linux/delay.h>
6 #include <linux/kthread.h>
8 #include <drm/ttm/ttm_resource.h>
9 #include <drm/ttm/ttm_placement.h>
10 #include <drm/ttm/ttm_tt.h>
12 #include "ttm_kunit_helpers.h"
13 #include "ttm_mock_manager.h"
16 #define MANAGER_SIZE SZ_1M
18 static struct spinlock fence_lock;
20 struct ttm_bo_validate_test_case {
21 const char *description;
22 enum ttm_bo_type bo_type;
28 static struct ttm_placement *ttm_placement_kunit_init(struct kunit *test,
29 struct ttm_place *places,
30 unsigned int num_places)
32 struct ttm_placement *placement;
34 placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
35 KUNIT_ASSERT_NOT_NULL(test, placement);
37 placement->num_placement = num_places;
38 placement->placement = places;
43 static const char *fence_name(struct dma_fence *f)
45 return "ttm-bo-validate-fence";
48 static const struct dma_fence_ops fence_ops = {
49 .get_driver_name = fence_name,
50 .get_timeline_name = fence_name,
53 static struct dma_fence *alloc_mock_fence(struct kunit *test)
55 struct dma_fence *fence;
57 fence = kunit_kzalloc(test, sizeof(*fence), GFP_KERNEL);
58 KUNIT_ASSERT_NOT_NULL(test, fence);
60 dma_fence_init(fence, &fence_ops, &fence_lock, 0, 0);
65 static void dma_resv_kunit_active_fence_init(struct kunit *test,
66 struct dma_resv *resv,
67 enum dma_resv_usage usage)
69 struct dma_fence *fence;
71 fence = alloc_mock_fence(test);
72 dma_fence_enable_sw_signaling(fence);
74 dma_resv_lock(resv, NULL);
75 dma_resv_reserve_fences(resv, 1);
76 dma_resv_add_fence(resv, fence, usage);
77 dma_resv_unlock(resv);
80 static void ttm_bo_validate_case_desc(const struct ttm_bo_validate_test_case *t,
83 strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
86 static const struct ttm_bo_validate_test_case ttm_bo_type_cases[] = {
88 .description = "Buffer object for userspace",
89 .bo_type = ttm_bo_type_device,
92 .description = "Kernel buffer object",
93 .bo_type = ttm_bo_type_kernel,
96 .description = "Shared buffer object",
97 .bo_type = ttm_bo_type_sg,
101 KUNIT_ARRAY_PARAM(ttm_bo_types, ttm_bo_type_cases,
102 ttm_bo_validate_case_desc);
104 static void ttm_bo_init_reserved_sys_man(struct kunit *test)
106 const struct ttm_bo_validate_test_case *params = test->param_value;
107 struct ttm_test_devices *priv = test->priv;
108 enum ttm_bo_type bo_type = params->bo_type;
109 u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
110 struct ttm_operation_ctx ctx = { };
111 struct ttm_placement *placement;
112 struct ttm_buffer_object *bo;
113 struct ttm_place *place;
116 bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
117 KUNIT_ASSERT_NOT_NULL(test, bo);
119 place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
120 placement = ttm_placement_kunit_init(test, place, 1);
122 drm_gem_private_object_init(priv->drm, &bo->base, size);
124 err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement,
125 PAGE_SIZE, &ctx, NULL, NULL,
126 &dummy_ttm_bo_destroy);
127 dma_resv_unlock(bo->base.resv);
129 KUNIT_EXPECT_EQ(test, err, 0);
130 KUNIT_EXPECT_EQ(test, kref_read(&bo->kref), 1);
131 KUNIT_EXPECT_PTR_EQ(test, bo->bdev, priv->ttm_dev);
132 KUNIT_EXPECT_EQ(test, bo->type, bo_type);
133 KUNIT_EXPECT_EQ(test, bo->page_alignment, PAGE_SIZE);
134 KUNIT_EXPECT_PTR_EQ(test, bo->destroy, &dummy_ttm_bo_destroy);
135 KUNIT_EXPECT_EQ(test, bo->pin_count, 0);
136 KUNIT_EXPECT_NULL(test, bo->bulk_move);
137 KUNIT_EXPECT_NOT_NULL(test, bo->ttm);
138 KUNIT_EXPECT_FALSE(test, ttm_tt_is_populated(bo->ttm));
139 KUNIT_EXPECT_NOT_NULL(test, (void *)bo->base.resv->fences);
140 KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
142 if (bo_type != ttm_bo_type_kernel)
143 KUNIT_EXPECT_TRUE(test,
144 drm_mm_node_allocated(&bo->base.vma_node.vm_node));
146 ttm_resource_free(bo, &bo->resource);
150 static void ttm_bo_init_reserved_mock_man(struct kunit *test)
152 const struct ttm_bo_validate_test_case *params = test->param_value;
153 enum ttm_bo_type bo_type = params->bo_type;
154 struct ttm_test_devices *priv = test->priv;
155 u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
156 struct ttm_operation_ctx ctx = { };
157 struct ttm_placement *placement;
158 u32 mem_type = TTM_PL_VRAM;
159 struct ttm_buffer_object *bo;
160 struct ttm_place *place;
163 ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
165 bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
166 KUNIT_ASSERT_NOT_NULL(test, bo);
168 place = ttm_place_kunit_init(test, mem_type, 0);
169 placement = ttm_placement_kunit_init(test, place, 1);
171 drm_gem_private_object_init(priv->drm, &bo->base, size);
173 err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement,
174 PAGE_SIZE, &ctx, NULL, NULL,
175 &dummy_ttm_bo_destroy);
176 dma_resv_unlock(bo->base.resv);
178 KUNIT_EXPECT_EQ(test, err, 0);
179 KUNIT_EXPECT_EQ(test, kref_read(&bo->kref), 1);
180 KUNIT_EXPECT_PTR_EQ(test, bo->bdev, priv->ttm_dev);
181 KUNIT_EXPECT_EQ(test, bo->type, bo_type);
182 KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
184 if (bo_type != ttm_bo_type_kernel)
185 KUNIT_EXPECT_TRUE(test,
186 drm_mm_node_allocated(&bo->base.vma_node.vm_node));
188 ttm_resource_free(bo, &bo->resource);
190 ttm_mock_manager_fini(priv->ttm_dev, mem_type);
193 static void ttm_bo_init_reserved_resv(struct kunit *test)
195 enum ttm_bo_type bo_type = ttm_bo_type_device;
196 struct ttm_test_devices *priv = test->priv;
197 u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
198 struct ttm_operation_ctx ctx = { };
199 struct ttm_placement *placement;
200 struct ttm_buffer_object *bo;
201 struct ttm_place *place;
202 struct dma_resv resv;
205 bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
206 KUNIT_ASSERT_NOT_NULL(test, bo);
208 place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
209 placement = ttm_placement_kunit_init(test, place, 1);
211 drm_gem_private_object_init(priv->drm, &bo->base, size);
212 dma_resv_init(&resv);
213 dma_resv_lock(&resv, NULL);
215 err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement,
216 PAGE_SIZE, &ctx, NULL, &resv,
217 &dummy_ttm_bo_destroy);
218 dma_resv_unlock(bo->base.resv);
220 KUNIT_EXPECT_EQ(test, err, 0);
221 KUNIT_EXPECT_PTR_EQ(test, bo->base.resv, &resv);
223 ttm_resource_free(bo, &bo->resource);
227 static void ttm_bo_validate_basic(struct kunit *test)
229 const struct ttm_bo_validate_test_case *params = test->param_value;
230 u32 fst_mem = TTM_PL_SYSTEM, snd_mem = TTM_PL_VRAM;
231 struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
232 struct ttm_placement *fst_placement, *snd_placement;
233 struct ttm_test_devices *priv = test->priv;
234 struct ttm_place *fst_place, *snd_place;
235 u32 size = ALIGN(SZ_8K, PAGE_SIZE);
236 struct ttm_buffer_object *bo;
239 ttm_mock_manager_init(priv->ttm_dev, snd_mem, MANAGER_SIZE);
241 fst_place = ttm_place_kunit_init(test, fst_mem, 0);
242 fst_placement = ttm_placement_kunit_init(test, fst_place, 1);
244 bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
245 KUNIT_ASSERT_NOT_NULL(test, bo);
247 drm_gem_private_object_init(priv->drm, &bo->base, size);
249 err = ttm_bo_init_reserved(priv->ttm_dev, bo, params->bo_type,
250 fst_placement, PAGE_SIZE, &ctx_init, NULL,
251 NULL, &dummy_ttm_bo_destroy);
252 KUNIT_EXPECT_EQ(test, err, 0);
254 snd_place = ttm_place_kunit_init(test, snd_mem, DRM_BUDDY_TOPDOWN_ALLOCATION);
255 snd_placement = ttm_placement_kunit_init(test, snd_place, 1);
257 err = ttm_bo_validate(bo, snd_placement, &ctx_val);
258 dma_resv_unlock(bo->base.resv);
260 KUNIT_EXPECT_EQ(test, err, 0);
261 KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, bo->base.size);
262 KUNIT_EXPECT_NOT_NULL(test, bo->ttm);
263 KUNIT_EXPECT_TRUE(test, ttm_tt_is_populated(bo->ttm));
264 KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem);
265 KUNIT_EXPECT_EQ(test, bo->resource->placement,
266 DRM_BUDDY_TOPDOWN_ALLOCATION);
269 ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
272 static void ttm_bo_validate_invalid_placement(struct kunit *test)
274 enum ttm_bo_type bo_type = ttm_bo_type_device;
275 u32 unknown_mem_type = TTM_PL_PRIV + 1;
276 u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
277 struct ttm_operation_ctx ctx = { };
278 struct ttm_placement *placement;
279 struct ttm_buffer_object *bo;
280 struct ttm_place *place;
283 place = ttm_place_kunit_init(test, unknown_mem_type, 0);
284 placement = ttm_placement_kunit_init(test, place, 1);
286 bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
289 ttm_bo_reserve(bo, false, false, NULL);
290 err = ttm_bo_validate(bo, placement, &ctx);
291 dma_resv_unlock(bo->base.resv);
293 KUNIT_EXPECT_EQ(test, err, -ENOMEM);
298 static void ttm_bo_validate_failed_alloc(struct kunit *test)
300 enum ttm_bo_type bo_type = ttm_bo_type_device;
301 struct ttm_test_devices *priv = test->priv;
302 u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
303 struct ttm_operation_ctx ctx = { };
304 struct ttm_placement *placement;
305 u32 mem_type = TTM_PL_VRAM;
306 struct ttm_buffer_object *bo;
307 struct ttm_place *place;
310 bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
313 ttm_bad_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
315 place = ttm_place_kunit_init(test, mem_type, 0);
316 placement = ttm_placement_kunit_init(test, place, 1);
318 ttm_bo_reserve(bo, false, false, NULL);
319 err = ttm_bo_validate(bo, placement, &ctx);
320 dma_resv_unlock(bo->base.resv);
322 KUNIT_EXPECT_EQ(test, err, -ENOMEM);
325 ttm_bad_manager_fini(priv->ttm_dev, mem_type);
328 static void ttm_bo_validate_pinned(struct kunit *test)
330 enum ttm_bo_type bo_type = ttm_bo_type_device;
331 u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
332 struct ttm_operation_ctx ctx = { };
333 u32 mem_type = TTM_PL_SYSTEM;
334 struct ttm_placement *placement;
335 struct ttm_buffer_object *bo;
336 struct ttm_place *place;
339 place = ttm_place_kunit_init(test, mem_type, 0);
340 placement = ttm_placement_kunit_init(test, place, 1);
342 bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
345 ttm_bo_reserve(bo, false, false, NULL);
347 err = ttm_bo_validate(bo, placement, &ctx);
348 dma_resv_unlock(bo->base.resv);
350 KUNIT_EXPECT_EQ(test, err, -EINVAL);
352 ttm_bo_reserve(bo, false, false, NULL);
354 dma_resv_unlock(bo->base.resv);
359 static const struct ttm_bo_validate_test_case ttm_mem_type_cases[] = {
361 .description = "System manager",
362 .mem_type = TTM_PL_SYSTEM,
365 .description = "VRAM manager",
366 .mem_type = TTM_PL_VRAM,
370 KUNIT_ARRAY_PARAM(ttm_bo_validate_mem, ttm_mem_type_cases,
371 ttm_bo_validate_case_desc);
373 static void ttm_bo_validate_same_placement(struct kunit *test)
375 const struct ttm_bo_validate_test_case *params = test->param_value;
376 struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
377 struct ttm_test_devices *priv = test->priv;
378 u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
379 struct ttm_placement *placement;
380 struct ttm_buffer_object *bo;
381 struct ttm_place *place;
384 place = ttm_place_kunit_init(test, params->mem_type, 0);
385 placement = ttm_placement_kunit_init(test, place, 1);
387 if (params->mem_type != TTM_PL_SYSTEM)
388 ttm_mock_manager_init(priv->ttm_dev, params->mem_type, MANAGER_SIZE);
390 bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
391 KUNIT_ASSERT_NOT_NULL(test, bo);
393 drm_gem_private_object_init(priv->drm, &bo->base, size);
395 err = ttm_bo_init_reserved(priv->ttm_dev, bo, params->bo_type,
396 placement, PAGE_SIZE, &ctx_init, NULL,
397 NULL, &dummy_ttm_bo_destroy);
398 KUNIT_EXPECT_EQ(test, err, 0);
400 err = ttm_bo_validate(bo, placement, &ctx_val);
401 dma_resv_unlock(bo->base.resv);
403 KUNIT_EXPECT_EQ(test, err, 0);
404 KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, 0);
408 if (params->mem_type != TTM_PL_SYSTEM)
409 ttm_mock_manager_fini(priv->ttm_dev, params->mem_type);
412 static void ttm_bo_validate_busy_placement(struct kunit *test)
414 u32 fst_mem = TTM_PL_VRAM, snd_mem = TTM_PL_VRAM + 1;
415 struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
416 struct ttm_placement *placement_init, *placement_val;
417 enum ttm_bo_type bo_type = ttm_bo_type_device;
418 struct ttm_test_devices *priv = test->priv;
419 u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
420 struct ttm_place *init_place, places[2];
421 struct ttm_resource_manager *man;
422 struct ttm_buffer_object *bo;
425 ttm_bad_manager_init(priv->ttm_dev, fst_mem, MANAGER_SIZE);
426 ttm_mock_manager_init(priv->ttm_dev, snd_mem, MANAGER_SIZE);
428 init_place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
429 placement_init = ttm_placement_kunit_init(test, init_place, 1);
431 bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
432 KUNIT_ASSERT_NOT_NULL(test, bo);
434 drm_gem_private_object_init(priv->drm, &bo->base, size);
436 err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement_init,
437 PAGE_SIZE, &ctx_init, NULL, NULL,
438 &dummy_ttm_bo_destroy);
439 KUNIT_EXPECT_EQ(test, err, 0);
441 places[0] = (struct ttm_place){ .mem_type = fst_mem, .flags = TTM_PL_FLAG_DESIRED };
442 places[1] = (struct ttm_place){ .mem_type = snd_mem, .flags = TTM_PL_FLAG_FALLBACK };
443 placement_val = ttm_placement_kunit_init(test, places, 2);
445 err = ttm_bo_validate(bo, placement_val, &ctx_val);
446 dma_resv_unlock(bo->base.resv);
448 man = ttm_manager_type(priv->ttm_dev, snd_mem);
450 KUNIT_EXPECT_EQ(test, err, 0);
451 KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, bo->base.size);
452 KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem);
453 KUNIT_ASSERT_TRUE(test, list_is_singular(&man->lru[bo->priority]));
456 ttm_bad_manager_fini(priv->ttm_dev, fst_mem);
457 ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
460 static void ttm_bo_validate_multihop(struct kunit *test)
462 const struct ttm_bo_validate_test_case *params = test->param_value;
463 struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
464 struct ttm_placement *placement_init, *placement_val;
465 u32 fst_mem = TTM_PL_VRAM, tmp_mem = TTM_PL_TT, final_mem = TTM_PL_SYSTEM;
466 struct ttm_test_devices *priv = test->priv;
467 struct ttm_place *fst_place, *final_place;
468 u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
469 struct ttm_buffer_object *bo;
472 ttm_mock_manager_init(priv->ttm_dev, fst_mem, MANAGER_SIZE);
473 ttm_mock_manager_init(priv->ttm_dev, tmp_mem, MANAGER_SIZE);
475 fst_place = ttm_place_kunit_init(test, fst_mem, 0);
476 placement_init = ttm_placement_kunit_init(test, fst_place, 1);
478 bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
479 KUNIT_ASSERT_NOT_NULL(test, bo);
481 drm_gem_private_object_init(priv->drm, &bo->base, size);
483 err = ttm_bo_init_reserved(priv->ttm_dev, bo, params->bo_type,
484 placement_init, PAGE_SIZE, &ctx_init, NULL,
485 NULL, &dummy_ttm_bo_destroy);
486 KUNIT_EXPECT_EQ(test, err, 0);
488 final_place = ttm_place_kunit_init(test, final_mem, 0);
489 placement_val = ttm_placement_kunit_init(test, final_place, 1);
491 err = ttm_bo_validate(bo, placement_val, &ctx_val);
492 dma_resv_unlock(bo->base.resv);
494 KUNIT_EXPECT_EQ(test, err, 0);
495 KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2);
496 KUNIT_EXPECT_EQ(test, bo->resource->mem_type, final_mem);
500 ttm_mock_manager_fini(priv->ttm_dev, fst_mem);
501 ttm_mock_manager_fini(priv->ttm_dev, tmp_mem);
504 static const struct ttm_bo_validate_test_case ttm_bo_no_placement_cases[] = {
506 .description = "Buffer object in system domain, no page vector",
509 .description = "Buffer object in system domain with an existing page vector",
514 KUNIT_ARRAY_PARAM(ttm_bo_no_placement, ttm_bo_no_placement_cases,
515 ttm_bo_validate_case_desc);
517 static void ttm_bo_validate_no_placement_signaled(struct kunit *test)
519 const struct ttm_bo_validate_test_case *params = test->param_value;
520 enum ttm_bo_type bo_type = ttm_bo_type_device;
521 struct ttm_test_devices *priv = test->priv;
522 u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
523 struct ttm_operation_ctx ctx = { };
524 u32 mem_type = TTM_PL_SYSTEM;
525 struct ttm_resource_manager *man;
526 struct ttm_placement *placement;
527 struct ttm_buffer_object *bo;
528 struct ttm_place *place;
529 struct ttm_tt *old_tt;
533 place = ttm_place_kunit_init(test, mem_type, 0);
534 man = ttm_manager_type(priv->ttm_dev, mem_type);
536 bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
539 if (params->with_ttm) {
540 old_tt = priv->ttm_dev->funcs->ttm_tt_create(bo, 0);
541 ttm_pool_alloc(&priv->ttm_dev->pool, old_tt, &ctx);
545 err = ttm_resource_alloc(bo, place, &bo->resource);
546 KUNIT_EXPECT_EQ(test, err, 0);
547 KUNIT_ASSERT_EQ(test, man->usage, size);
549 placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
550 KUNIT_ASSERT_NOT_NULL(test, placement);
552 ttm_bo_reserve(bo, false, false, NULL);
553 err = ttm_bo_validate(bo, placement, &ctx);
554 ttm_bo_unreserve(bo);
556 KUNIT_EXPECT_EQ(test, err, 0);
557 KUNIT_ASSERT_EQ(test, man->usage, 0);
558 KUNIT_ASSERT_NOT_NULL(test, bo->ttm);
559 KUNIT_EXPECT_EQ(test, ctx.bytes_moved, 0);
561 if (params->with_ttm) {
562 flags = bo->ttm->page_flags;
564 KUNIT_ASSERT_PTR_EQ(test, bo->ttm, old_tt);
565 KUNIT_ASSERT_FALSE(test, flags & TTM_TT_FLAG_PRIV_POPULATED);
566 KUNIT_ASSERT_TRUE(test, flags & TTM_TT_FLAG_ZERO_ALLOC);
572 static int threaded_dma_resv_signal(void *arg)
574 struct ttm_buffer_object *bo = arg;
575 struct dma_resv *resv = bo->base.resv;
576 struct dma_resv_iter cursor;
577 struct dma_fence *fence;
579 dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
580 dma_resv_for_each_fence_unlocked(&cursor, fence) {
581 dma_fence_signal(fence);
583 dma_resv_iter_end(&cursor);
588 static void ttm_bo_validate_no_placement_not_signaled(struct kunit *test)
590 const struct ttm_bo_validate_test_case *params = test->param_value;
591 enum dma_resv_usage usage = DMA_RESV_USAGE_BOOKKEEP;
592 u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
593 struct ttm_operation_ctx ctx = { };
594 u32 mem_type = TTM_PL_SYSTEM;
595 struct ttm_placement *placement;
596 struct ttm_buffer_object *bo;
597 struct task_struct *task;
598 struct ttm_place *place;
601 place = ttm_place_kunit_init(test, mem_type, 0);
603 bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
604 bo->type = params->bo_type;
606 err = ttm_resource_alloc(bo, place, &bo->resource);
607 KUNIT_EXPECT_EQ(test, err, 0);
609 placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
610 KUNIT_ASSERT_NOT_NULL(test, placement);
612 /* Create an active fence to simulate a non-idle resv object */
613 spin_lock_init(&fence_lock);
614 dma_resv_kunit_active_fence_init(test, bo->base.resv, usage);
616 task = kthread_create(threaded_dma_resv_signal, bo, "dma-resv-signal");
618 KUNIT_FAIL(test, "Couldn't create dma resv signal task\n");
620 wake_up_process(task);
621 ttm_bo_reserve(bo, false, false, NULL);
622 err = ttm_bo_validate(bo, placement, &ctx);
623 ttm_bo_unreserve(bo);
625 KUNIT_EXPECT_EQ(test, err, 0);
626 KUNIT_ASSERT_NOT_NULL(test, bo->ttm);
627 KUNIT_ASSERT_NULL(test, bo->resource);
628 KUNIT_ASSERT_NULL(test, bo->bulk_move);
629 KUNIT_EXPECT_EQ(test, ctx.bytes_moved, 0);
631 if (bo->type != ttm_bo_type_sg)
632 KUNIT_ASSERT_PTR_EQ(test, bo->base.resv, &bo->base._resv);
634 /* Make sure we have an idle object at this point */
635 dma_resv_wait_timeout(bo->base.resv, usage, false, MAX_SCHEDULE_TIMEOUT);
640 static void ttm_bo_validate_move_fence_signaled(struct kunit *test)
642 enum ttm_bo_type bo_type = ttm_bo_type_device;
643 struct ttm_test_devices *priv = test->priv;
644 u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
645 struct ttm_operation_ctx ctx = { };
646 u32 mem_type = TTM_PL_SYSTEM;
647 struct ttm_resource_manager *man;
648 struct ttm_placement *placement;
649 struct ttm_buffer_object *bo;
650 struct ttm_place *place;
653 man = ttm_manager_type(priv->ttm_dev, mem_type);
654 man->move = dma_fence_get_stub();
656 bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
659 place = ttm_place_kunit_init(test, mem_type, 0);
660 placement = ttm_placement_kunit_init(test, place, 1);
662 ttm_bo_reserve(bo, false, false, NULL);
663 err = ttm_bo_validate(bo, placement, &ctx);
664 ttm_bo_unreserve(bo);
666 KUNIT_EXPECT_EQ(test, err, 0);
667 KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type);
668 KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
671 dma_fence_put(man->move);
674 static const struct ttm_bo_validate_test_case ttm_bo_validate_wait_cases[] = {
676 .description = "Waits for GPU",
677 .no_gpu_wait = false,
680 .description = "Tries to lock straight away",
685 KUNIT_ARRAY_PARAM(ttm_bo_validate_wait, ttm_bo_validate_wait_cases,
686 ttm_bo_validate_case_desc);
688 static int threaded_fence_signal(void *arg)
690 struct dma_fence *fence = arg;
694 return dma_fence_signal(fence);
697 static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test)
699 const struct ttm_bo_validate_test_case *params = test->param_value;
700 struct ttm_operation_ctx ctx_init = { },
701 ctx_val = { .no_wait_gpu = params->no_gpu_wait };
702 u32 fst_mem = TTM_PL_VRAM, snd_mem = TTM_PL_VRAM + 1;
703 struct ttm_placement *placement_init, *placement_val;
704 enum ttm_bo_type bo_type = ttm_bo_type_device;
705 struct ttm_test_devices *priv = test->priv;
706 u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
707 struct ttm_place *init_place, places[2];
708 struct ttm_resource_manager *man;
709 struct ttm_buffer_object *bo;
710 struct task_struct *task;
713 init_place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
714 placement_init = ttm_placement_kunit_init(test, init_place, 1);
716 bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
717 KUNIT_ASSERT_NOT_NULL(test, bo);
719 drm_gem_private_object_init(priv->drm, &bo->base, size);
721 err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement_init,
722 PAGE_SIZE, &ctx_init, NULL, NULL,
723 &dummy_ttm_bo_destroy);
724 KUNIT_EXPECT_EQ(test, err, 0);
726 ttm_mock_manager_init(priv->ttm_dev, fst_mem, MANAGER_SIZE);
727 ttm_mock_manager_init(priv->ttm_dev, snd_mem, MANAGER_SIZE);
729 places[0] = (struct ttm_place){ .mem_type = fst_mem, .flags = TTM_PL_FLAG_DESIRED };
730 places[1] = (struct ttm_place){ .mem_type = snd_mem, .flags = TTM_PL_FLAG_FALLBACK };
731 placement_val = ttm_placement_kunit_init(test, places, 2);
733 spin_lock_init(&fence_lock);
734 man = ttm_manager_type(priv->ttm_dev, fst_mem);
735 man->move = alloc_mock_fence(test);
737 task = kthread_create(threaded_fence_signal, man->move, "move-fence-signal");
739 KUNIT_FAIL(test, "Couldn't create move fence signal task\n");
741 wake_up_process(task);
742 err = ttm_bo_validate(bo, placement_val, &ctx_val);
743 dma_resv_unlock(bo->base.resv);
745 dma_fence_wait_timeout(man->move, false, MAX_SCHEDULE_TIMEOUT);
747 KUNIT_EXPECT_EQ(test, err, 0);
748 KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size);
750 if (params->no_gpu_wait)
751 KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem);
753 KUNIT_EXPECT_EQ(test, bo->resource->mem_type, fst_mem);
756 ttm_mock_manager_fini(priv->ttm_dev, fst_mem);
757 ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
760 static void ttm_bo_validate_swapout(struct kunit *test)
762 unsigned long size_big, size = ALIGN(BO_SIZE, PAGE_SIZE);
763 enum ttm_bo_type bo_type = ttm_bo_type_device;
764 struct ttm_buffer_object *bo_small, *bo_big;
765 struct ttm_test_devices *priv = test->priv;
766 struct ttm_operation_ctx ctx = { };
767 struct ttm_placement *placement;
768 u32 mem_type = TTM_PL_TT;
769 struct ttm_place *place;
774 size_big = ALIGN(((u64)si.totalram * si.mem_unit / 2), PAGE_SIZE);
776 ttm_mock_manager_init(priv->ttm_dev, mem_type, size_big + size);
778 place = ttm_place_kunit_init(test, mem_type, 0);
779 placement = ttm_placement_kunit_init(test, place, 1);
781 bo_small = kunit_kzalloc(test, sizeof(*bo_small), GFP_KERNEL);
782 KUNIT_ASSERT_NOT_NULL(test, bo_small);
784 drm_gem_private_object_init(priv->drm, &bo_small->base, size);
786 err = ttm_bo_init_reserved(priv->ttm_dev, bo_small, bo_type, placement,
787 PAGE_SIZE, &ctx, NULL, NULL,
788 &dummy_ttm_bo_destroy);
789 KUNIT_EXPECT_EQ(test, err, 0);
790 dma_resv_unlock(bo_small->base.resv);
792 bo_big = ttm_bo_kunit_init(test, priv, size_big, NULL);
794 dma_resv_lock(bo_big->base.resv, NULL);
795 err = ttm_bo_validate(bo_big, placement, &ctx);
796 dma_resv_unlock(bo_big->base.resv);
798 KUNIT_EXPECT_EQ(test, err, 0);
799 KUNIT_EXPECT_NOT_NULL(test, bo_big->resource);
800 KUNIT_EXPECT_EQ(test, bo_big->resource->mem_type, mem_type);
801 KUNIT_EXPECT_EQ(test, bo_small->resource->mem_type, TTM_PL_SYSTEM);
802 KUNIT_EXPECT_TRUE(test, bo_small->ttm->page_flags & TTM_TT_FLAG_SWAPPED);
805 ttm_bo_put(bo_small);
807 ttm_mock_manager_fini(priv->ttm_dev, mem_type);
810 static void ttm_bo_validate_happy_evict(struct kunit *test)
812 u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT,
813 mem_type_evict = TTM_PL_SYSTEM;
814 struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
815 enum ttm_bo_type bo_type = ttm_bo_type_device;
816 u32 small = SZ_8K, medium = SZ_512K,
817 big = MANAGER_SIZE - (small + medium);
818 u32 bo_sizes[] = { small, medium, big };
819 struct ttm_test_devices *priv = test->priv;
820 struct ttm_buffer_object *bos, *bo_val;
821 struct ttm_placement *placement;
822 struct ttm_place *place;
826 ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
827 ttm_mock_manager_init(priv->ttm_dev, mem_multihop, MANAGER_SIZE);
829 place = ttm_place_kunit_init(test, mem_type, 0);
830 placement = ttm_placement_kunit_init(test, place, 1);
832 bos = kunit_kmalloc_array(test, bo_no, sizeof(*bos), GFP_KERNEL);
833 KUNIT_ASSERT_NOT_NULL(test, bos);
835 memset(bos, 0, sizeof(*bos) * bo_no);
836 for (i = 0; i < bo_no; i++) {
837 drm_gem_private_object_init(priv->drm, &bos[i].base, bo_sizes[i]);
838 err = ttm_bo_init_reserved(priv->ttm_dev, &bos[i], bo_type, placement,
839 PAGE_SIZE, &ctx_init, NULL, NULL,
840 &dummy_ttm_bo_destroy);
841 dma_resv_unlock(bos[i].base.resv);
844 bo_val = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
845 bo_val->type = bo_type;
847 ttm_bo_reserve(bo_val, false, false, NULL);
848 err = ttm_bo_validate(bo_val, placement, &ctx_val);
849 ttm_bo_unreserve(bo_val);
851 KUNIT_EXPECT_EQ(test, err, 0);
852 KUNIT_EXPECT_EQ(test, bos[0].resource->mem_type, mem_type_evict);
853 KUNIT_EXPECT_TRUE(test, bos[0].ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC);
854 KUNIT_EXPECT_TRUE(test, bos[0].ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
855 KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, small * 2 + BO_SIZE);
856 KUNIT_EXPECT_EQ(test, bos[1].resource->mem_type, mem_type);
858 for (i = 0; i < bo_no; i++)
862 ttm_mock_manager_fini(priv->ttm_dev, mem_type);
863 ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
866 static void ttm_bo_validate_all_pinned_evict(struct kunit *test)
868 struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
869 enum ttm_bo_type bo_type = ttm_bo_type_device;
870 struct ttm_buffer_object *bo_big, *bo_small;
871 struct ttm_test_devices *priv = test->priv;
872 struct ttm_placement *placement;
873 u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT;
874 struct ttm_place *place;
877 ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
878 ttm_mock_manager_init(priv->ttm_dev, mem_multihop, MANAGER_SIZE);
880 place = ttm_place_kunit_init(test, mem_type, 0);
881 placement = ttm_placement_kunit_init(test, place, 1);
883 bo_big = kunit_kzalloc(test, sizeof(*bo_big), GFP_KERNEL);
884 KUNIT_ASSERT_NOT_NULL(test, bo_big);
886 drm_gem_private_object_init(priv->drm, &bo_big->base, MANAGER_SIZE);
887 err = ttm_bo_init_reserved(priv->ttm_dev, bo_big, bo_type, placement,
888 PAGE_SIZE, &ctx_init, NULL, NULL,
889 &dummy_ttm_bo_destroy);
890 KUNIT_EXPECT_EQ(test, err, 0);
893 dma_resv_unlock(bo_big->base.resv);
895 bo_small = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
896 bo_small->type = bo_type;
898 ttm_bo_reserve(bo_small, false, false, NULL);
899 err = ttm_bo_validate(bo_small, placement, &ctx_val);
900 ttm_bo_unreserve(bo_small);
902 KUNIT_EXPECT_EQ(test, err, -ENOMEM);
904 ttm_bo_put(bo_small);
906 ttm_bo_reserve(bo_big, false, false, NULL);
907 ttm_bo_unpin(bo_big);
908 dma_resv_unlock(bo_big->base.resv);
911 ttm_mock_manager_fini(priv->ttm_dev, mem_type);
912 ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
915 static void ttm_bo_validate_allowed_only_evict(struct kunit *test)
917 u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT,
918 mem_type_evict = TTM_PL_SYSTEM;
919 struct ttm_buffer_object *bo, *bo_evictable, *bo_pinned;
920 struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
921 enum ttm_bo_type bo_type = ttm_bo_type_device;
922 struct ttm_test_devices *priv = test->priv;
923 struct ttm_placement *placement;
924 struct ttm_place *place;
928 ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
929 ttm_mock_manager_init(priv->ttm_dev, mem_multihop, MANAGER_SIZE);
931 place = ttm_place_kunit_init(test, mem_type, 0);
932 placement = ttm_placement_kunit_init(test, place, 1);
934 bo_pinned = kunit_kzalloc(test, sizeof(*bo_pinned), GFP_KERNEL);
935 KUNIT_ASSERT_NOT_NULL(test, bo_pinned);
937 drm_gem_private_object_init(priv->drm, &bo_pinned->base, size);
938 err = ttm_bo_init_reserved(priv->ttm_dev, bo_pinned, bo_type, placement,
939 PAGE_SIZE, &ctx_init, NULL, NULL,
940 &dummy_ttm_bo_destroy);
941 KUNIT_EXPECT_EQ(test, err, 0);
942 ttm_bo_pin(bo_pinned);
943 dma_resv_unlock(bo_pinned->base.resv);
945 bo_evictable = kunit_kzalloc(test, sizeof(*bo_evictable), GFP_KERNEL);
946 KUNIT_ASSERT_NOT_NULL(test, bo_evictable);
948 drm_gem_private_object_init(priv->drm, &bo_evictable->base, size);
949 err = ttm_bo_init_reserved(priv->ttm_dev, bo_evictable, bo_type, placement,
950 PAGE_SIZE, &ctx_init, NULL, NULL,
951 &dummy_ttm_bo_destroy);
952 KUNIT_EXPECT_EQ(test, err, 0);
953 dma_resv_unlock(bo_evictable->base.resv);
955 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
958 ttm_bo_reserve(bo, false, false, NULL);
959 err = ttm_bo_validate(bo, placement, &ctx_val);
960 ttm_bo_unreserve(bo);
962 KUNIT_EXPECT_EQ(test, err, 0);
963 KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type);
964 KUNIT_EXPECT_EQ(test, bo_pinned->resource->mem_type, mem_type);
965 KUNIT_EXPECT_EQ(test, bo_evictable->resource->mem_type, mem_type_evict);
966 KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2 + BO_SIZE);
969 ttm_bo_put(bo_evictable);
971 ttm_bo_reserve(bo_pinned, false, false, NULL);
972 ttm_bo_unpin(bo_pinned);
973 dma_resv_unlock(bo_pinned->base.resv);
974 ttm_bo_put(bo_pinned);
976 ttm_mock_manager_fini(priv->ttm_dev, mem_type);
977 ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
980 static void ttm_bo_validate_deleted_evict(struct kunit *test)
982 struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
983 u32 small = SZ_8K, big = MANAGER_SIZE - BO_SIZE;
984 enum ttm_bo_type bo_type = ttm_bo_type_device;
985 struct ttm_buffer_object *bo_big, *bo_small;
986 struct ttm_test_devices *priv = test->priv;
987 struct ttm_resource_manager *man;
988 u32 mem_type = TTM_PL_VRAM;
989 struct ttm_placement *placement;
990 struct ttm_place *place;
993 ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
994 man = ttm_manager_type(priv->ttm_dev, mem_type);
996 place = ttm_place_kunit_init(test, mem_type, 0);
997 placement = ttm_placement_kunit_init(test, place, 1);
999 bo_big = kunit_kzalloc(test, sizeof(*bo_big), GFP_KERNEL);
1000 KUNIT_ASSERT_NOT_NULL(test, bo_big);
1002 drm_gem_private_object_init(priv->drm, &bo_big->base, big);
1003 err = ttm_bo_init_reserved(priv->ttm_dev, bo_big, bo_type, placement,
1004 PAGE_SIZE, &ctx_init, NULL, NULL,
1005 &dummy_ttm_bo_destroy);
1006 KUNIT_EXPECT_EQ(test, err, 0);
1007 KUNIT_EXPECT_EQ(test, ttm_resource_manager_usage(man), big);
1009 dma_resv_unlock(bo_big->base.resv);
1010 bo_big->deleted = true;
1012 bo_small = ttm_bo_kunit_init(test, test->priv, small, NULL);
1013 bo_small->type = bo_type;
1015 ttm_bo_reserve(bo_small, false, false, NULL);
1016 err = ttm_bo_validate(bo_small, placement, &ctx_val);
1017 ttm_bo_unreserve(bo_small);
1019 KUNIT_EXPECT_EQ(test, err, 0);
1020 KUNIT_EXPECT_EQ(test, bo_small->resource->mem_type, mem_type);
1021 KUNIT_EXPECT_EQ(test, ttm_resource_manager_usage(man), small);
1022 KUNIT_EXPECT_NULL(test, bo_big->ttm);
1023 KUNIT_EXPECT_NULL(test, bo_big->resource);
1025 ttm_bo_put(bo_small);
1027 ttm_mock_manager_fini(priv->ttm_dev, mem_type);
1030 static void ttm_bo_validate_busy_domain_evict(struct kunit *test)
1032 u32 mem_type = TTM_PL_VRAM, mem_type_evict = TTM_PL_MOCK1;
1033 struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
1034 enum ttm_bo_type bo_type = ttm_bo_type_device;
1035 struct ttm_test_devices *priv = test->priv;
1036 struct ttm_buffer_object *bo_init, *bo_val;
1037 struct ttm_placement *placement;
1038 struct ttm_place *place;
1042 * Drop the default device and setup a new one that points to busy
1043 * thus unsuitable eviction domain
1045 ttm_device_fini(priv->ttm_dev);
1047 err = ttm_device_kunit_init_bad_evict(test->priv, priv->ttm_dev, false, false);
1048 KUNIT_ASSERT_EQ(test, err, 0);
1050 ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
1051 ttm_busy_manager_init(priv->ttm_dev, mem_type_evict, MANAGER_SIZE);
1053 place = ttm_place_kunit_init(test, mem_type, 0);
1054 placement = ttm_placement_kunit_init(test, place, 1);
1056 bo_init = kunit_kzalloc(test, sizeof(*bo_init), GFP_KERNEL);
1057 KUNIT_ASSERT_NOT_NULL(test, bo_init);
1059 drm_gem_private_object_init(priv->drm, &bo_init->base, MANAGER_SIZE);
1060 err = ttm_bo_init_reserved(priv->ttm_dev, bo_init, bo_type, placement,
1061 PAGE_SIZE, &ctx_init, NULL, NULL,
1062 &dummy_ttm_bo_destroy);
1063 KUNIT_EXPECT_EQ(test, err, 0);
1064 dma_resv_unlock(bo_init->base.resv);
1066 bo_val = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
1067 bo_val->type = bo_type;
1069 ttm_bo_reserve(bo_val, false, false, NULL);
1070 err = ttm_bo_validate(bo_val, placement, &ctx_val);
1071 ttm_bo_unreserve(bo_val);
1073 KUNIT_EXPECT_EQ(test, err, -ENOMEM);
1074 KUNIT_EXPECT_EQ(test, bo_init->resource->mem_type, mem_type);
1075 KUNIT_EXPECT_NULL(test, bo_val->resource);
1077 ttm_bo_put(bo_init);
1080 ttm_mock_manager_fini(priv->ttm_dev, mem_type);
1081 ttm_bad_manager_fini(priv->ttm_dev, mem_type_evict);
1084 static void ttm_bo_validate_evict_gutting(struct kunit *test)
1086 struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
1087 enum ttm_bo_type bo_type = ttm_bo_type_device;
1088 struct ttm_test_devices *priv = test->priv;
1089 struct ttm_buffer_object *bo, *bo_evict;
1090 u32 mem_type = TTM_PL_MOCK1;
1091 struct ttm_placement *placement;
1092 struct ttm_place *place;
1095 ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
1097 place = ttm_place_kunit_init(test, mem_type, 0);
1098 placement = ttm_placement_kunit_init(test, place, 1);
1100 bo_evict = kunit_kzalloc(test, sizeof(*bo_evict), GFP_KERNEL);
1101 KUNIT_ASSERT_NOT_NULL(test, bo_evict);
1103 drm_gem_private_object_init(priv->drm, &bo_evict->base, MANAGER_SIZE);
1104 err = ttm_bo_init_reserved(priv->ttm_dev, bo_evict, bo_type, placement,
1105 PAGE_SIZE, &ctx_init, NULL, NULL,
1106 &dummy_ttm_bo_destroy);
1107 KUNIT_EXPECT_EQ(test, err, 0);
1108 dma_resv_unlock(bo_evict->base.resv);
1110 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
1113 ttm_bo_reserve(bo, false, false, NULL);
1114 err = ttm_bo_validate(bo, placement, &ctx_val);
1115 ttm_bo_unreserve(bo);
1117 KUNIT_EXPECT_EQ(test, err, 0);
1118 KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type);
1119 KUNIT_ASSERT_NULL(test, bo_evict->resource);
1120 KUNIT_ASSERT_TRUE(test, bo_evict->ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC);
1122 ttm_bo_put(bo_evict);
1125 ttm_mock_manager_fini(priv->ttm_dev, mem_type);
1128 static void ttm_bo_validate_recrusive_evict(struct kunit *test)
1130 u32 mem_type = TTM_PL_TT, mem_type_evict = TTM_PL_MOCK2;
1131 struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
1132 struct ttm_placement *placement_tt, *placement_mock;
1133 struct ttm_buffer_object *bo_tt, *bo_mock, *bo_val;
1134 enum ttm_bo_type bo_type = ttm_bo_type_device;
1135 struct ttm_test_devices *priv = test->priv;
1136 struct ttm_place *place_tt, *place_mock;
1139 ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
1140 ttm_mock_manager_init(priv->ttm_dev, mem_type_evict, MANAGER_SIZE);
1142 place_tt = ttm_place_kunit_init(test, mem_type, 0);
1143 place_mock = ttm_place_kunit_init(test, mem_type_evict, 0);
1145 placement_tt = ttm_placement_kunit_init(test, place_tt, 1);
1146 placement_mock = ttm_placement_kunit_init(test, place_mock, 1);
1148 bo_tt = kunit_kzalloc(test, sizeof(*bo_tt), GFP_KERNEL);
1149 KUNIT_ASSERT_NOT_NULL(test, bo_tt);
1151 bo_mock = kunit_kzalloc(test, sizeof(*bo_mock), GFP_KERNEL);
1152 KUNIT_ASSERT_NOT_NULL(test, bo_mock);
1154 drm_gem_private_object_init(priv->drm, &bo_tt->base, MANAGER_SIZE);
1155 err = ttm_bo_init_reserved(priv->ttm_dev, bo_tt, bo_type, placement_tt,
1156 PAGE_SIZE, &ctx_init, NULL, NULL,
1157 &dummy_ttm_bo_destroy);
1158 KUNIT_EXPECT_EQ(test, err, 0);
1159 dma_resv_unlock(bo_tt->base.resv);
1161 drm_gem_private_object_init(priv->drm, &bo_mock->base, MANAGER_SIZE);
1162 err = ttm_bo_init_reserved(priv->ttm_dev, bo_mock, bo_type, placement_mock,
1163 PAGE_SIZE, &ctx_init, NULL, NULL,
1164 &dummy_ttm_bo_destroy);
1165 KUNIT_EXPECT_EQ(test, err, 0);
1166 dma_resv_unlock(bo_mock->base.resv);
1168 bo_val = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
1169 bo_val->type = bo_type;
1171 ttm_bo_reserve(bo_val, false, false, NULL);
1172 err = ttm_bo_validate(bo_val, placement_tt, &ctx_val);
1173 ttm_bo_unreserve(bo_val);
1175 KUNIT_EXPECT_EQ(test, err, 0);
1177 ttm_mock_manager_fini(priv->ttm_dev, mem_type);
1178 ttm_mock_manager_fini(priv->ttm_dev, mem_type_evict);
1182 ttm_bo_put(bo_mock);
1185 static struct kunit_case ttm_bo_validate_test_cases[] = {
1186 KUNIT_CASE_PARAM(ttm_bo_init_reserved_sys_man, ttm_bo_types_gen_params),
1187 KUNIT_CASE_PARAM(ttm_bo_init_reserved_mock_man, ttm_bo_types_gen_params),
1188 KUNIT_CASE(ttm_bo_init_reserved_resv),
1189 KUNIT_CASE_PARAM(ttm_bo_validate_basic, ttm_bo_types_gen_params),
1190 KUNIT_CASE(ttm_bo_validate_invalid_placement),
1191 KUNIT_CASE_PARAM(ttm_bo_validate_same_placement,
1192 ttm_bo_validate_mem_gen_params),
1193 KUNIT_CASE(ttm_bo_validate_failed_alloc),
1194 KUNIT_CASE(ttm_bo_validate_pinned),
1195 KUNIT_CASE(ttm_bo_validate_busy_placement),
1196 KUNIT_CASE_PARAM(ttm_bo_validate_multihop, ttm_bo_types_gen_params),
1197 KUNIT_CASE_PARAM(ttm_bo_validate_no_placement_signaled,
1198 ttm_bo_no_placement_gen_params),
1199 KUNIT_CASE_PARAM(ttm_bo_validate_no_placement_not_signaled,
1200 ttm_bo_types_gen_params),
1201 KUNIT_CASE(ttm_bo_validate_move_fence_signaled),
1202 KUNIT_CASE_PARAM(ttm_bo_validate_move_fence_not_signaled,
1203 ttm_bo_validate_wait_gen_params),
1204 KUNIT_CASE(ttm_bo_validate_swapout),
1205 KUNIT_CASE(ttm_bo_validate_happy_evict),
1206 KUNIT_CASE(ttm_bo_validate_all_pinned_evict),
1207 KUNIT_CASE(ttm_bo_validate_allowed_only_evict),
1208 KUNIT_CASE(ttm_bo_validate_deleted_evict),
1209 KUNIT_CASE(ttm_bo_validate_busy_domain_evict),
1210 KUNIT_CASE(ttm_bo_validate_evict_gutting),
1211 KUNIT_CASE(ttm_bo_validate_recrusive_evict),
1215 static struct kunit_suite ttm_bo_validate_test_suite = {
1216 .name = "ttm_bo_validate",
1217 .init = ttm_test_devices_all_init,
1218 .exit = ttm_test_devices_fini,
1219 .test_cases = ttm_bo_validate_test_cases,
1222 kunit_test_suites(&ttm_bo_validate_test_suite);
1224 MODULE_DESCRIPTION("KUnit tests for ttm_bo APIs");
1225 MODULE_LICENSE("GPL and additional rights");