1 // SPDX-License-Identifier: GPL-2.0 AND MIT
3 * Copyright © 2023 Intel Corporation
7 #include <drm/ttm/ttm_tt.h>
8 #include <drm/ttm/ttm_pool.h>
10 #include "ttm_kunit_helpers.h"
12 struct ttm_pool_test_case {
13 const char *description;
18 struct ttm_pool_test_priv {
19 struct ttm_test_devices *devs;
21 /* Used to create mock ttm_tts */
22 struct ttm_buffer_object *mock_bo;
25 static struct ttm_operation_ctx simple_ctx = {
26 .interruptible = true,
30 static int ttm_pool_test_init(struct kunit *test)
32 struct ttm_pool_test_priv *priv;
34 priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
35 KUNIT_ASSERT_NOT_NULL(test, priv);
37 priv->devs = ttm_test_devices_basic(test);
43 static void ttm_pool_test_fini(struct kunit *test)
45 struct ttm_pool_test_priv *priv = test->priv;
47 ttm_test_devices_put(test, priv->devs);
50 static struct ttm_tt *ttm_tt_kunit_init(struct kunit *test,
52 enum ttm_caching caching,
55 struct ttm_pool_test_priv *priv = test->priv;
56 struct ttm_buffer_object *bo;
60 bo = ttm_bo_kunit_init(test, priv->devs, size);
61 KUNIT_ASSERT_NOT_NULL(test, bo);
64 tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
65 KUNIT_ASSERT_NOT_NULL(test, tt);
67 err = ttm_tt_init(tt, priv->mock_bo, page_flags, caching, 0);
68 KUNIT_ASSERT_EQ(test, err, 0);
73 static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
75 enum ttm_caching caching)
77 struct ttm_pool_test_priv *priv = test->priv;
78 struct ttm_test_devices *devs = priv->devs;
79 struct ttm_pool *pool;
81 unsigned long order = __fls(size / PAGE_SIZE);
84 tt = ttm_tt_kunit_init(test, order, caching, size);
85 KUNIT_ASSERT_NOT_NULL(test, tt);
87 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
88 KUNIT_ASSERT_NOT_NULL(test, pool);
90 ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
92 err = ttm_pool_alloc(pool, tt, &simple_ctx);
93 KUNIT_ASSERT_EQ(test, err, 0);
95 ttm_pool_free(pool, tt);
101 static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
103 .description = "One page",
107 .description = "More than one page",
111 .description = "Above the allocation limit",
112 .order = MAX_PAGE_ORDER + 1,
115 .description = "One page, with coherent DMA mappings enabled",
117 .use_dma_alloc = true,
120 .description = "Above the allocation limit, with coherent DMA mappings enabled",
121 .order = MAX_PAGE_ORDER + 1,
122 .use_dma_alloc = true,
126 static void ttm_pool_alloc_case_desc(const struct ttm_pool_test_case *t,
129 strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
132 KUNIT_ARRAY_PARAM(ttm_pool_alloc_basic, ttm_pool_basic_cases,
133 ttm_pool_alloc_case_desc);
135 static void ttm_pool_alloc_basic(struct kunit *test)
137 struct ttm_pool_test_priv *priv = test->priv;
138 struct ttm_test_devices *devs = priv->devs;
139 const struct ttm_pool_test_case *params = test->param_value;
141 struct ttm_pool *pool;
142 struct page *fst_page, *last_page;
143 enum ttm_caching caching = ttm_uncached;
144 unsigned int expected_num_pages = 1 << params->order;
145 size_t size = expected_num_pages * PAGE_SIZE;
148 tt = ttm_tt_kunit_init(test, 0, caching, size);
149 KUNIT_ASSERT_NOT_NULL(test, tt);
151 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
152 KUNIT_ASSERT_NOT_NULL(test, pool);
154 ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc,
157 KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
158 KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
159 KUNIT_ASSERT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
161 err = ttm_pool_alloc(pool, tt, &simple_ctx);
162 KUNIT_ASSERT_EQ(test, err, 0);
163 KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
165 fst_page = tt->pages[0];
166 last_page = tt->pages[tt->num_pages - 1];
168 if (params->order <= MAX_PAGE_ORDER) {
169 if (params->use_dma_alloc) {
170 KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
171 KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
173 KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
176 if (params->use_dma_alloc) {
177 KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
178 KUNIT_ASSERT_NULL(test, (void *)last_page->private);
181 * We expect to alloc one big block, followed by
184 KUNIT_ASSERT_EQ(test, fst_page->private,
185 min_t(unsigned int, MAX_PAGE_ORDER,
187 KUNIT_ASSERT_EQ(test, last_page->private, 0);
191 ttm_pool_free(pool, tt);
196 static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
198 struct ttm_pool_test_priv *priv = test->priv;
199 struct ttm_test_devices *devs = priv->devs;
200 const struct ttm_pool_test_case *params = test->param_value;
202 struct ttm_pool *pool;
203 struct ttm_buffer_object *bo;
204 dma_addr_t dma1, dma2;
205 enum ttm_caching caching = ttm_uncached;
206 unsigned int expected_num_pages = 1 << params->order;
207 size_t size = expected_num_pages * PAGE_SIZE;
210 tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
211 KUNIT_ASSERT_NOT_NULL(test, tt);
213 bo = ttm_bo_kunit_init(test, devs, size);
214 KUNIT_ASSERT_NOT_NULL(test, bo);
216 err = ttm_sg_tt_init(tt, bo, 0, caching);
217 KUNIT_ASSERT_EQ(test, err, 0);
219 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
220 KUNIT_ASSERT_NOT_NULL(test, pool);
222 ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
224 err = ttm_pool_alloc(pool, tt, &simple_ctx);
225 KUNIT_ASSERT_EQ(test, err, 0);
226 KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
228 dma1 = tt->dma_address[0];
229 dma2 = tt->dma_address[tt->num_pages - 1];
231 KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma1);
232 KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma2);
234 ttm_pool_free(pool, tt);
239 static void ttm_pool_alloc_order_caching_match(struct kunit *test)
242 struct ttm_pool *pool;
243 struct ttm_pool_type *pt;
244 enum ttm_caching caching = ttm_uncached;
245 unsigned int order = 0;
246 size_t size = PAGE_SIZE;
249 pool = ttm_pool_pre_populated(test, size, caching);
251 pt = &pool->caching[caching].orders[order];
252 KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
254 tt = ttm_tt_kunit_init(test, 0, caching, size);
255 KUNIT_ASSERT_NOT_NULL(test, tt);
257 err = ttm_pool_alloc(pool, tt, &simple_ctx);
258 KUNIT_ASSERT_EQ(test, err, 0);
260 KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
262 ttm_pool_free(pool, tt);
267 static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
270 struct ttm_pool *pool;
271 struct ttm_pool_type *pt_pool, *pt_tt;
272 enum ttm_caching tt_caching = ttm_uncached;
273 enum ttm_caching pool_caching = ttm_cached;
274 size_t size = PAGE_SIZE;
275 unsigned int order = 0;
278 pool = ttm_pool_pre_populated(test, size, pool_caching);
280 pt_pool = &pool->caching[pool_caching].orders[order];
281 pt_tt = &pool->caching[tt_caching].orders[order];
283 tt = ttm_tt_kunit_init(test, 0, tt_caching, size);
284 KUNIT_ASSERT_NOT_NULL(test, tt);
286 KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
287 KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
289 err = ttm_pool_alloc(pool, tt, &simple_ctx);
290 KUNIT_ASSERT_EQ(test, err, 0);
292 ttm_pool_free(pool, tt);
295 KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
296 KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
301 static void ttm_pool_alloc_order_mismatch(struct kunit *test)
304 struct ttm_pool *pool;
305 struct ttm_pool_type *pt_pool, *pt_tt;
306 enum ttm_caching caching = ttm_uncached;
307 unsigned int order = 2;
308 size_t fst_size = (1 << order) * PAGE_SIZE;
309 size_t snd_size = PAGE_SIZE;
312 pool = ttm_pool_pre_populated(test, fst_size, caching);
314 pt_pool = &pool->caching[caching].orders[order];
315 pt_tt = &pool->caching[caching].orders[0];
317 tt = ttm_tt_kunit_init(test, 0, caching, snd_size);
318 KUNIT_ASSERT_NOT_NULL(test, tt);
320 KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
321 KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
323 err = ttm_pool_alloc(pool, tt, &simple_ctx);
324 KUNIT_ASSERT_EQ(test, err, 0);
326 ttm_pool_free(pool, tt);
329 KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
330 KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
335 static void ttm_pool_free_dma_alloc(struct kunit *test)
337 struct ttm_pool_test_priv *priv = test->priv;
338 struct ttm_test_devices *devs = priv->devs;
340 struct ttm_pool *pool;
341 struct ttm_pool_type *pt;
342 enum ttm_caching caching = ttm_uncached;
343 unsigned int order = 2;
344 size_t size = (1 << order) * PAGE_SIZE;
346 tt = ttm_tt_kunit_init(test, 0, caching, size);
347 KUNIT_ASSERT_NOT_NULL(test, tt);
349 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
350 KUNIT_ASSERT_NOT_NULL(test, pool);
352 ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
353 ttm_pool_alloc(pool, tt, &simple_ctx);
355 pt = &pool->caching[caching].orders[order];
356 KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
358 ttm_pool_free(pool, tt);
361 KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
366 static void ttm_pool_free_no_dma_alloc(struct kunit *test)
368 struct ttm_pool_test_priv *priv = test->priv;
369 struct ttm_test_devices *devs = priv->devs;
371 struct ttm_pool *pool;
372 struct ttm_pool_type *pt;
373 enum ttm_caching caching = ttm_uncached;
374 unsigned int order = 2;
375 size_t size = (1 << order) * PAGE_SIZE;
377 tt = ttm_tt_kunit_init(test, 0, caching, size);
378 KUNIT_ASSERT_NOT_NULL(test, tt);
380 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
381 KUNIT_ASSERT_NOT_NULL(test, pool);
383 ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false);
384 ttm_pool_alloc(pool, tt, &simple_ctx);
386 pt = &pool->caching[caching].orders[order];
387 KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
389 ttm_pool_free(pool, tt);
392 KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
397 static void ttm_pool_fini_basic(struct kunit *test)
399 struct ttm_pool *pool;
400 struct ttm_pool_type *pt;
401 enum ttm_caching caching = ttm_uncached;
402 unsigned int order = 0;
403 size_t size = PAGE_SIZE;
405 pool = ttm_pool_pre_populated(test, size, caching);
406 pt = &pool->caching[caching].orders[order];
408 KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
412 KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
415 static struct kunit_case ttm_pool_test_cases[] = {
416 KUNIT_CASE_PARAM(ttm_pool_alloc_basic, ttm_pool_alloc_basic_gen_params),
417 KUNIT_CASE_PARAM(ttm_pool_alloc_basic_dma_addr,
418 ttm_pool_alloc_basic_gen_params),
419 KUNIT_CASE(ttm_pool_alloc_order_caching_match),
420 KUNIT_CASE(ttm_pool_alloc_caching_mismatch),
421 KUNIT_CASE(ttm_pool_alloc_order_mismatch),
422 KUNIT_CASE(ttm_pool_free_dma_alloc),
423 KUNIT_CASE(ttm_pool_free_no_dma_alloc),
424 KUNIT_CASE(ttm_pool_fini_basic),
428 static struct kunit_suite ttm_pool_test_suite = {
430 .init = ttm_pool_test_init,
431 .exit = ttm_pool_test_fini,
432 .test_cases = ttm_pool_test_cases,
435 kunit_test_suites(&ttm_pool_test_suite);
437 MODULE_LICENSE("GPL");