1 // SPDX-License-Identifier: GPL-2.0 AND MIT
3 * Copyright © 2023 Intel Corporation
5 #include <drm/ttm/ttm_resource.h>
6 #include <drm/ttm/ttm_device.h>
7 #include <drm/ttm/ttm_placement.h>
9 #include "ttm_kunit_helpers.h"
11 struct ttm_device_test_case {
12 const char *description;
15 bool pools_init_expected;
18 static void ttm_device_init_basic(struct kunit *test)
20 struct ttm_test_devices *priv = test->priv;
21 struct ttm_device *ttm_dev;
22 struct ttm_resource_manager *ttm_sys_man;
25 ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
26 KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
28 err = ttm_device_kunit_init(priv, ttm_dev, false, false);
29 KUNIT_ASSERT_EQ(test, err, 0);
31 KUNIT_EXPECT_PTR_EQ(test, ttm_dev->funcs, &ttm_dev_funcs);
32 KUNIT_ASSERT_NOT_NULL(test, ttm_dev->wq);
33 KUNIT_ASSERT_NOT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
35 ttm_sys_man = &ttm_dev->sysman;
36 KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man);
37 KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_tt);
38 KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_type);
39 KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man->func);
41 KUNIT_EXPECT_PTR_EQ(test, ttm_dev->dev_mapping,
42 priv->drm->anon_inode->i_mapping);
44 ttm_device_fini(ttm_dev);
47 static void ttm_device_init_multiple(struct kunit *test)
49 struct ttm_test_devices *priv = test->priv;
50 struct ttm_device *ttm_devs;
51 unsigned int i, num_dev = 3;
54 ttm_devs = kunit_kcalloc(test, num_dev, sizeof(*ttm_devs), GFP_KERNEL);
55 KUNIT_ASSERT_NOT_NULL(test, ttm_devs);
57 for (i = 0; i < num_dev; i++) {
58 err = ttm_device_kunit_init(priv, &ttm_devs[i], false, false);
59 KUNIT_ASSERT_EQ(test, err, 0);
61 KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].dev_mapping,
62 priv->drm->anon_inode->i_mapping);
63 KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].wq);
64 KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].funcs, &ttm_dev_funcs);
65 KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].man_drv[TTM_PL_SYSTEM]);
68 KUNIT_ASSERT_EQ(test, list_count_nodes(&ttm_devs[0].device_list), num_dev);
70 for (i = 0; i < num_dev; i++)
71 ttm_device_fini(&ttm_devs[i]);
74 static void ttm_device_fini_basic(struct kunit *test)
76 struct ttm_test_devices *priv = test->priv;
77 struct ttm_device *ttm_dev;
78 struct ttm_resource_manager *man;
81 ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
82 KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
84 err = ttm_device_kunit_init(priv, ttm_dev, false, false);
85 KUNIT_ASSERT_EQ(test, err, 0);
87 man = ttm_manager_type(ttm_dev, TTM_PL_SYSTEM);
88 KUNIT_ASSERT_NOT_NULL(test, man);
90 ttm_device_fini(ttm_dev);
92 KUNIT_ASSERT_FALSE(test, man->use_type);
93 KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[0]));
94 KUNIT_ASSERT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
97 static void ttm_device_init_no_vma_man(struct kunit *test)
99 struct ttm_test_devices *priv = test->priv;
100 struct drm_device *drm = priv->drm;
101 struct ttm_device *ttm_dev;
102 struct drm_vma_offset_manager *vma_man;
105 ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
106 KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
108 /* Let's pretend there's no VMA manager allocated */
109 vma_man = drm->vma_offset_manager;
110 drm->vma_offset_manager = NULL;
112 err = ttm_device_kunit_init(priv, ttm_dev, false, false);
113 KUNIT_EXPECT_EQ(test, err, -EINVAL);
115 /* Bring the manager back for a graceful cleanup */
116 drm->vma_offset_manager = vma_man;
119 static const struct ttm_device_test_case ttm_device_cases[] = {
121 .description = "No DMA allocations, no DMA32 required",
122 .use_dma_alloc = false,
124 .pools_init_expected = false,
127 .description = "DMA allocations, DMA32 required",
128 .use_dma_alloc = true,
130 .pools_init_expected = true,
133 .description = "No DMA allocations, DMA32 required",
134 .use_dma_alloc = false,
136 .pools_init_expected = false,
139 .description = "DMA allocations, no DMA32 required",
140 .use_dma_alloc = true,
142 .pools_init_expected = true,
146 static void ttm_device_case_desc(const struct ttm_device_test_case *t, char *desc)
148 strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
151 KUNIT_ARRAY_PARAM(ttm_device, ttm_device_cases, ttm_device_case_desc);
153 static void ttm_device_init_pools(struct kunit *test)
155 struct ttm_test_devices *priv = test->priv;
156 const struct ttm_device_test_case *params = test->param_value;
157 struct ttm_device *ttm_dev;
158 struct ttm_pool *pool;
159 struct ttm_pool_type pt;
162 ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
163 KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
165 err = ttm_device_kunit_init(priv, ttm_dev,
166 params->use_dma_alloc,
168 KUNIT_ASSERT_EQ(test, err, 0);
170 pool = &ttm_dev->pool;
171 KUNIT_ASSERT_NOT_NULL(test, pool);
172 KUNIT_EXPECT_PTR_EQ(test, pool->dev, priv->dev);
173 KUNIT_EXPECT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
174 KUNIT_EXPECT_EQ(test, pool->use_dma32, params->use_dma32);
176 if (params->pools_init_expected) {
177 for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
178 for (int j = 0; j < NR_PAGE_ORDERS; ++j) {
179 pt = pool->caching[i].orders[j];
180 KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
181 KUNIT_EXPECT_EQ(test, pt.caching, i);
182 KUNIT_EXPECT_EQ(test, pt.order, j);
184 if (params->use_dma_alloc)
185 KUNIT_ASSERT_FALSE(test,
186 list_empty(&pt.pages));
191 ttm_device_fini(ttm_dev);
194 static struct kunit_case ttm_device_test_cases[] = {
195 KUNIT_CASE(ttm_device_init_basic),
196 KUNIT_CASE(ttm_device_init_multiple),
197 KUNIT_CASE(ttm_device_fini_basic),
198 KUNIT_CASE(ttm_device_init_no_vma_man),
199 KUNIT_CASE_PARAM(ttm_device_init_pools, ttm_device_gen_params),
203 static struct kunit_suite ttm_device_test_suite = {
204 .name = "ttm_device",
205 .init = ttm_test_devices_init,
206 .exit = ttm_test_devices_fini,
207 .test_cases = ttm_device_test_cases,
210 kunit_test_suites(&ttm_device_test_suite);
212 MODULE_DESCRIPTION("KUnit tests for ttm_device APIs");
213 MODULE_LICENSE("GPL and additional rights");