1 // SPDX-License-Identifier: GPL-2.0 AND MIT
3 * Copyright © 2023 Intel Corporation
5 #include <linux/shmem_fs.h>
6 #include <drm/ttm/ttm_tt.h>
8 #include "ttm_kunit_helpers.h"
12 struct ttm_tt_test_case {
13 const char *description;
18 static const struct ttm_tt_test_case ttm_tt_init_basic_cases[] = {
20 .description = "Page-aligned size",
24 .description = "Extra pages requested",
30 static void ttm_tt_init_case_desc(const struct ttm_tt_test_case *t,
33 strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
36 KUNIT_ARRAY_PARAM(ttm_tt_init_basic, ttm_tt_init_basic_cases,
37 ttm_tt_init_case_desc);
39 static void ttm_tt_init_basic(struct kunit *test)
41 const struct ttm_tt_test_case *params = test->param_value;
42 struct ttm_buffer_object *bo;
44 u32 page_flags = TTM_TT_FLAG_ZERO_ALLOC;
45 enum ttm_caching caching = ttm_cached;
46 u32 extra_pages = params->extra_pages_num;
47 int num_pages = params->size >> PAGE_SHIFT;
50 tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
51 KUNIT_ASSERT_NOT_NULL(test, tt);
53 bo = ttm_bo_kunit_init(test, test->priv, params->size, NULL);
55 err = ttm_tt_init(tt, bo, page_flags, caching, extra_pages);
56 KUNIT_ASSERT_EQ(test, err, 0);
58 KUNIT_ASSERT_EQ(test, tt->num_pages, num_pages + extra_pages);
60 KUNIT_ASSERT_EQ(test, tt->page_flags, page_flags);
61 KUNIT_ASSERT_EQ(test, tt->caching, caching);
63 KUNIT_ASSERT_NULL(test, tt->dma_address);
64 KUNIT_ASSERT_NULL(test, tt->swap_storage);
67 static void ttm_tt_init_misaligned(struct kunit *test)
69 struct ttm_buffer_object *bo;
71 enum ttm_caching caching = ttm_cached;
73 int num_pages = (size + SZ_4K) >> PAGE_SHIFT;
76 tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
77 KUNIT_ASSERT_NOT_NULL(test, tt);
79 bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
81 /* Make the object size misaligned */
84 err = ttm_tt_init(tt, bo, 0, caching, 0);
85 KUNIT_ASSERT_EQ(test, err, 0);
87 KUNIT_ASSERT_EQ(test, tt->num_pages, num_pages);
90 static void ttm_tt_fini_basic(struct kunit *test)
92 struct ttm_buffer_object *bo;
94 enum ttm_caching caching = ttm_cached;
97 tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
98 KUNIT_ASSERT_NOT_NULL(test, tt);
100 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
102 err = ttm_tt_init(tt, bo, 0, caching, 0);
103 KUNIT_ASSERT_EQ(test, err, 0);
104 KUNIT_ASSERT_NOT_NULL(test, tt->pages);
107 KUNIT_ASSERT_NULL(test, tt->pages);
110 static void ttm_tt_fini_sg(struct kunit *test)
112 struct ttm_buffer_object *bo;
114 enum ttm_caching caching = ttm_cached;
117 tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
118 KUNIT_ASSERT_NOT_NULL(test, tt);
120 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
122 err = ttm_sg_tt_init(tt, bo, 0, caching);
123 KUNIT_ASSERT_EQ(test, err, 0);
124 KUNIT_ASSERT_NOT_NULL(test, tt->dma_address);
127 KUNIT_ASSERT_NULL(test, tt->dma_address);
130 static void ttm_tt_fini_shmem(struct kunit *test)
132 struct ttm_buffer_object *bo;
135 enum ttm_caching caching = ttm_cached;
138 tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
139 KUNIT_ASSERT_NOT_NULL(test, tt);
141 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
143 err = ttm_tt_init(tt, bo, 0, caching, 0);
144 KUNIT_ASSERT_EQ(test, err, 0);
146 shmem = shmem_file_setup("ttm swap", BO_SIZE, 0);
147 tt->swap_storage = shmem;
150 KUNIT_ASSERT_NULL(test, tt->swap_storage);
153 static void ttm_tt_create_basic(struct kunit *test)
155 struct ttm_buffer_object *bo;
158 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
159 bo->type = ttm_bo_type_device;
161 dma_resv_lock(bo->base.resv, NULL);
162 err = ttm_tt_create(bo, false);
163 dma_resv_unlock(bo->base.resv);
165 KUNIT_EXPECT_EQ(test, err, 0);
166 KUNIT_EXPECT_NOT_NULL(test, bo->ttm);
168 /* Free manually, as it was allocated outside of KUnit */
172 static void ttm_tt_create_invalid_bo_type(struct kunit *test)
174 struct ttm_buffer_object *bo;
177 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
178 bo->type = ttm_bo_type_sg + 1;
180 dma_resv_lock(bo->base.resv, NULL);
181 err = ttm_tt_create(bo, false);
182 dma_resv_unlock(bo->base.resv);
184 KUNIT_EXPECT_EQ(test, err, -EINVAL);
185 KUNIT_EXPECT_NULL(test, bo->ttm);
188 static void ttm_tt_create_ttm_exists(struct kunit *test)
190 struct ttm_buffer_object *bo;
192 enum ttm_caching caching = ttm_cached;
195 tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
196 KUNIT_ASSERT_NOT_NULL(test, tt);
198 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
200 err = ttm_tt_init(tt, bo, 0, caching, 0);
201 KUNIT_ASSERT_EQ(test, err, 0);
204 dma_resv_lock(bo->base.resv, NULL);
205 err = ttm_tt_create(bo, false);
206 dma_resv_unlock(bo->base.resv);
208 /* Expect to keep the previous TTM */
209 KUNIT_ASSERT_EQ(test, err, 0);
210 KUNIT_ASSERT_PTR_EQ(test, tt, bo->ttm);
213 static struct ttm_tt *ttm_tt_null_create(struct ttm_buffer_object *bo,
219 static struct ttm_device_funcs ttm_dev_empty_funcs = {
220 .ttm_tt_create = ttm_tt_null_create,
223 static void ttm_tt_create_failed(struct kunit *test)
225 const struct ttm_test_devices *devs = test->priv;
226 struct ttm_buffer_object *bo;
229 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
231 /* Update ttm_device_funcs so we don't alloc ttm_tt */
232 devs->ttm_dev->funcs = &ttm_dev_empty_funcs;
234 dma_resv_lock(bo->base.resv, NULL);
235 err = ttm_tt_create(bo, false);
236 dma_resv_unlock(bo->base.resv);
238 KUNIT_ASSERT_EQ(test, err, -ENOMEM);
241 static void ttm_tt_destroy_basic(struct kunit *test)
243 const struct ttm_test_devices *devs = test->priv;
244 struct ttm_buffer_object *bo;
247 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
249 dma_resv_lock(bo->base.resv, NULL);
250 err = ttm_tt_create(bo, false);
251 dma_resv_unlock(bo->base.resv);
253 KUNIT_ASSERT_EQ(test, err, 0);
254 KUNIT_ASSERT_NOT_NULL(test, bo->ttm);
256 ttm_tt_destroy(devs->ttm_dev, bo->ttm);
259 static void ttm_tt_populate_null_ttm(struct kunit *test)
261 const struct ttm_test_devices *devs = test->priv;
262 struct ttm_operation_ctx ctx = { };
265 err = ttm_tt_populate(devs->ttm_dev, NULL, &ctx);
266 KUNIT_ASSERT_EQ(test, err, -EINVAL);
269 static void ttm_tt_populate_populated_ttm(struct kunit *test)
271 const struct ttm_test_devices *devs = test->priv;
272 struct ttm_operation_ctx ctx = { };
273 struct ttm_buffer_object *bo;
275 struct page *populated_page;
278 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
280 tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
281 KUNIT_ASSERT_NOT_NULL(test, tt);
283 err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
284 KUNIT_ASSERT_EQ(test, err, 0);
286 err = ttm_tt_populate(devs->ttm_dev, tt, &ctx);
287 KUNIT_ASSERT_EQ(test, err, 0);
288 populated_page = *tt->pages;
290 err = ttm_tt_populate(devs->ttm_dev, tt, &ctx);
291 KUNIT_ASSERT_PTR_EQ(test, populated_page, *tt->pages);
294 static void ttm_tt_unpopulate_basic(struct kunit *test)
296 const struct ttm_test_devices *devs = test->priv;
297 struct ttm_operation_ctx ctx = { };
298 struct ttm_buffer_object *bo;
302 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
304 tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
305 KUNIT_ASSERT_NOT_NULL(test, tt);
307 err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
308 KUNIT_ASSERT_EQ(test, err, 0);
310 err = ttm_tt_populate(devs->ttm_dev, tt, &ctx);
311 KUNIT_ASSERT_EQ(test, err, 0);
312 KUNIT_ASSERT_TRUE(test, ttm_tt_is_populated(tt));
314 ttm_tt_unpopulate(devs->ttm_dev, tt);
315 KUNIT_ASSERT_FALSE(test, ttm_tt_is_populated(tt));
318 static void ttm_tt_unpopulate_empty_ttm(struct kunit *test)
320 const struct ttm_test_devices *devs = test->priv;
321 struct ttm_buffer_object *bo;
325 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
327 tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
328 KUNIT_ASSERT_NOT_NULL(test, tt);
330 err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
331 KUNIT_ASSERT_EQ(test, err, 0);
333 ttm_tt_unpopulate(devs->ttm_dev, tt);
334 /* Expect graceful handling of unpopulated TTs */
337 static void ttm_tt_swapin_basic(struct kunit *test)
339 const struct ttm_test_devices *devs = test->priv;
340 int expected_num_pages = BO_SIZE >> PAGE_SHIFT;
341 struct ttm_operation_ctx ctx = { };
342 struct ttm_buffer_object *bo;
346 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
348 tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
349 KUNIT_ASSERT_NOT_NULL(test, tt);
351 err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
352 KUNIT_ASSERT_EQ(test, err, 0);
354 err = ttm_tt_populate(devs->ttm_dev, tt, &ctx);
355 KUNIT_ASSERT_EQ(test, err, 0);
356 KUNIT_ASSERT_TRUE(test, ttm_tt_is_populated(tt));
358 num_pages = ttm_tt_swapout(devs->ttm_dev, tt, GFP_KERNEL);
359 KUNIT_ASSERT_EQ(test, num_pages, expected_num_pages);
360 KUNIT_ASSERT_NOT_NULL(test, tt->swap_storage);
361 KUNIT_ASSERT_TRUE(test, tt->page_flags & TTM_TT_FLAG_SWAPPED);
363 /* Swapout depopulates TT, allocate pages and then swap them in */
364 err = ttm_pool_alloc(&devs->ttm_dev->pool, tt, &ctx);
365 KUNIT_ASSERT_EQ(test, err, 0);
367 err = ttm_tt_swapin(tt);
368 KUNIT_ASSERT_EQ(test, err, 0);
369 KUNIT_ASSERT_NULL(test, tt->swap_storage);
370 KUNIT_ASSERT_FALSE(test, tt->page_flags & TTM_TT_FLAG_SWAPPED);
373 static struct kunit_case ttm_tt_test_cases[] = {
374 KUNIT_CASE_PARAM(ttm_tt_init_basic, ttm_tt_init_basic_gen_params),
375 KUNIT_CASE(ttm_tt_init_misaligned),
376 KUNIT_CASE(ttm_tt_fini_basic),
377 KUNIT_CASE(ttm_tt_fini_sg),
378 KUNIT_CASE(ttm_tt_fini_shmem),
379 KUNIT_CASE(ttm_tt_create_basic),
380 KUNIT_CASE(ttm_tt_create_invalid_bo_type),
381 KUNIT_CASE(ttm_tt_create_ttm_exists),
382 KUNIT_CASE(ttm_tt_create_failed),
383 KUNIT_CASE(ttm_tt_destroy_basic),
384 KUNIT_CASE(ttm_tt_populate_null_ttm),
385 KUNIT_CASE(ttm_tt_populate_populated_ttm),
386 KUNIT_CASE(ttm_tt_unpopulate_basic),
387 KUNIT_CASE(ttm_tt_unpopulate_empty_ttm),
388 KUNIT_CASE(ttm_tt_swapin_basic),
392 static struct kunit_suite ttm_tt_test_suite = {
394 .init = ttm_test_devices_all_init,
395 .exit = ttm_test_devices_fini,
396 .test_cases = ttm_tt_test_cases,
399 kunit_test_suites(&ttm_tt_test_suite);
401 MODULE_DESCRIPTION("KUnit tests for ttm_tt APIs");
402 MODULE_LICENSE("GPL and additional rights");