1 // SPDX-License-Identifier: GPL-2.0
3 * KUnit test suite for GEM objects backed by shmem buffers
5 * Copyright (C) 2023 Red Hat, Inc.
10 #include <linux/dma-buf.h>
11 #include <linux/iosys-map.h>
12 #include <linux/sizes.h>
14 #include <kunit/test.h>
16 #include <drm/drm_device.h>
17 #include <drm/drm_drv.h>
18 #include <drm/drm_gem.h>
19 #include <drm/drm_gem_shmem_helper.h>
20 #include <drm/drm_kunit_helpers.h>
22 #define TEST_SIZE SZ_1M
23 #define TEST_BYTE 0xae
26 * Wrappers to avoid an explicit type casting when passing action
27 * functions to kunit_add_action().
29 static void kfree_wrapper(void *ptr)
31 const void *obj = ptr;
36 static void sg_free_table_wrapper(void *ptr)
38 struct sg_table *sgt = ptr;
43 static void drm_gem_shmem_free_wrapper(void *ptr)
45 struct drm_gem_shmem_object *shmem = ptr;
47 drm_gem_shmem_free(shmem);
51 * Test creating a shmem GEM object backed by shmem buffer. The test
52 * case succeeds if the GEM object is successfully allocated with the
53 * shmem file node and object functions attributes set, and the size
54 * attribute is equal to the correct size.
56 static void drm_gem_shmem_test_obj_create(struct kunit *test)
58 struct drm_device *drm_dev = test->priv;
59 struct drm_gem_shmem_object *shmem;
61 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
62 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
63 KUNIT_EXPECT_EQ(test, shmem->base.size, TEST_SIZE);
64 KUNIT_EXPECT_NOT_NULL(test, shmem->base.filp);
65 KUNIT_EXPECT_NOT_NULL(test, shmem->base.funcs);
67 drm_gem_shmem_free(shmem);
71 * Test creating a shmem GEM object from a scatter/gather table exported
72 * via a DMA-BUF. The test case succeed if the GEM object is successfully
73 * created with the shmem file node attribute equal to NULL and the sgt
74 * attribute pointing to the scatter/gather table that has been imported.
76 static void drm_gem_shmem_test_obj_create_private(struct kunit *test)
78 struct drm_device *drm_dev = test->priv;
79 struct drm_gem_shmem_object *shmem;
80 struct drm_gem_object *gem_obj;
81 struct dma_buf buf_mock;
82 struct dma_buf_attachment attach_mock;
87 /* Create a mock scatter/gather table */
88 buf = kunit_kzalloc(test, TEST_SIZE, GFP_KERNEL);
89 KUNIT_ASSERT_NOT_NULL(test, buf);
91 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
92 KUNIT_ASSERT_NOT_NULL(test, sgt);
94 ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
95 KUNIT_ASSERT_EQ(test, ret, 0);
97 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
98 KUNIT_ASSERT_EQ(test, ret, 0);
100 ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
101 KUNIT_ASSERT_EQ(test, ret, 0);
103 sg_init_one(sgt->sgl, buf, TEST_SIZE);
105 /* Init a mock DMA-BUF */
106 buf_mock.size = TEST_SIZE;
107 attach_mock.dmabuf = &buf_mock;
109 gem_obj = drm_gem_shmem_prime_import_sg_table(drm_dev, &attach_mock, sgt);
110 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gem_obj);
111 KUNIT_EXPECT_EQ(test, gem_obj->size, TEST_SIZE);
112 KUNIT_EXPECT_NULL(test, gem_obj->filp);
113 KUNIT_EXPECT_NOT_NULL(test, gem_obj->funcs);
115 /* The scatter/gather table will be freed by drm_gem_shmem_free */
116 kunit_remove_action(test, sg_free_table_wrapper, sgt);
117 kunit_remove_action(test, kfree_wrapper, sgt);
119 shmem = to_drm_gem_shmem_obj(gem_obj);
120 KUNIT_EXPECT_PTR_EQ(test, shmem->sgt, sgt);
122 drm_gem_shmem_free(shmem);
126 * Test pinning backing pages for a shmem GEM object. The test case
127 * succeeds if a suitable number of backing pages are allocated, and
128 * the pages table counter attribute is increased by one.
130 static void drm_gem_shmem_test_pin_pages(struct kunit *test)
132 struct drm_device *drm_dev = test->priv;
133 struct drm_gem_shmem_object *shmem;
136 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
137 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
138 KUNIT_EXPECT_NULL(test, shmem->pages);
139 KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
141 ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
142 KUNIT_ASSERT_EQ(test, ret, 0);
144 ret = drm_gem_shmem_pin(shmem);
145 KUNIT_ASSERT_EQ(test, ret, 0);
146 KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
147 KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
149 for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++)
150 KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]);
152 drm_gem_shmem_unpin(shmem);
153 KUNIT_EXPECT_NULL(test, shmem->pages);
154 KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
158 * Test creating a virtual mapping for a shmem GEM object. The test
159 * case succeeds if the backing memory is mapped and the reference
160 * counter for virtual mapping is increased by one. Moreover, the test
161 * case writes and then reads a test pattern over the mapped memory.
163 static void drm_gem_shmem_test_vmap(struct kunit *test)
165 struct drm_device *drm_dev = test->priv;
166 struct drm_gem_shmem_object *shmem;
167 struct iosys_map map;
170 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
171 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
172 KUNIT_EXPECT_NULL(test, shmem->vaddr);
173 KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
175 ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
176 KUNIT_ASSERT_EQ(test, ret, 0);
178 ret = drm_gem_shmem_vmap(shmem, &map);
179 KUNIT_ASSERT_EQ(test, ret, 0);
180 KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
181 KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
182 KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1);
184 iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
185 for (i = 0; i < TEST_SIZE; i++)
186 KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE);
188 drm_gem_shmem_vunmap(shmem, &map);
189 KUNIT_EXPECT_NULL(test, shmem->vaddr);
190 KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
194 * Test exporting a scatter/gather table of pinned pages suitable for
195 * PRIME usage from a shmem GEM object. The test case succeeds if a
196 * scatter/gather table large enough to accommodate the backing memory
197 * is successfully exported.
199 static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test)
201 struct drm_device *drm_dev = test->priv;
202 struct drm_gem_shmem_object *shmem;
203 struct sg_table *sgt;
204 struct scatterlist *sg;
205 unsigned int si, len = 0;
208 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
209 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
211 ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
212 KUNIT_ASSERT_EQ(test, ret, 0);
214 ret = drm_gem_shmem_pin(shmem);
215 KUNIT_ASSERT_EQ(test, ret, 0);
217 sgt = drm_gem_shmem_get_sg_table(shmem);
218 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
219 KUNIT_EXPECT_NULL(test, shmem->sgt);
221 ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
222 KUNIT_ASSERT_EQ(test, ret, 0);
224 for_each_sgtable_sg(sgt, sg, si) {
225 KUNIT_EXPECT_NOT_NULL(test, sg);
229 KUNIT_EXPECT_GE(test, len, TEST_SIZE);
233 * Test pinning pages and exporting a scatter/gather table suitable for
234 * driver usage from a shmem GEM object. The test case succeeds if the
235 * backing pages are pinned and a scatter/gather table large enough to
236 * accommodate the backing memory is successfully exported.
238 static void drm_gem_shmem_test_get_sg_table(struct kunit *test)
240 struct drm_device *drm_dev = test->priv;
241 struct drm_gem_shmem_object *shmem;
242 struct sg_table *sgt;
243 struct scatterlist *sg;
244 unsigned int si, ret, len = 0;
246 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
247 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
249 ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
250 KUNIT_ASSERT_EQ(test, ret, 0);
252 /* The scatter/gather table will be freed by drm_gem_shmem_free */
253 sgt = drm_gem_shmem_get_pages_sgt(shmem);
254 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
255 KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
256 KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
257 KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt);
259 for_each_sgtable_sg(sgt, sg, si) {
260 KUNIT_EXPECT_NOT_NULL(test, sg);
264 KUNIT_EXPECT_GE(test, len, TEST_SIZE);
268 * Test updating the madvise state of a shmem GEM object. The test
269 * case checks that the function for setting madv updates it only if
270 * its current value is greater or equal than zero and returns false
271 * if it has a negative value.
273 static void drm_gem_shmem_test_madvise(struct kunit *test)
275 struct drm_device *drm_dev = test->priv;
276 struct drm_gem_shmem_object *shmem;
279 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
280 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
281 KUNIT_ASSERT_EQ(test, shmem->madv, 0);
283 ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
284 KUNIT_ASSERT_EQ(test, ret, 0);
286 ret = drm_gem_shmem_madvise(shmem, 1);
287 KUNIT_EXPECT_TRUE(test, ret);
288 KUNIT_ASSERT_EQ(test, shmem->madv, 1);
290 /* Set madv to a negative value */
291 ret = drm_gem_shmem_madvise(shmem, -1);
292 KUNIT_EXPECT_FALSE(test, ret);
293 KUNIT_ASSERT_EQ(test, shmem->madv, -1);
295 /* Check that madv cannot be set back to a positive value */
296 ret = drm_gem_shmem_madvise(shmem, 0);
297 KUNIT_EXPECT_FALSE(test, ret);
298 KUNIT_ASSERT_EQ(test, shmem->madv, -1);
302 * Test purging a shmem GEM object. First, assert that a newly created
303 * shmem GEM object is not purgeable. Then, set madvise to a positive
304 * value and call drm_gem_shmem_get_pages_sgt() to pin and dma-map the
305 * backing pages. Finally, assert that the shmem GEM object is now
306 * purgeable and purge it.
308 static void drm_gem_shmem_test_purge(struct kunit *test)
310 struct drm_device *drm_dev = test->priv;
311 struct drm_gem_shmem_object *shmem;
312 struct sg_table *sgt;
315 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
316 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
318 ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
319 KUNIT_ASSERT_EQ(test, ret, 0);
321 ret = drm_gem_shmem_is_purgeable(shmem);
322 KUNIT_EXPECT_FALSE(test, ret);
324 ret = drm_gem_shmem_madvise(shmem, 1);
325 KUNIT_EXPECT_TRUE(test, ret);
327 /* The scatter/gather table will be freed by drm_gem_shmem_free */
328 sgt = drm_gem_shmem_get_pages_sgt(shmem);
329 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
331 ret = drm_gem_shmem_is_purgeable(shmem);
332 KUNIT_EXPECT_TRUE(test, ret);
334 drm_gem_shmem_purge(shmem);
335 KUNIT_EXPECT_NULL(test, shmem->pages);
336 KUNIT_EXPECT_NULL(test, shmem->sgt);
337 KUNIT_EXPECT_EQ(test, shmem->madv, -1);
340 static int drm_gem_shmem_test_init(struct kunit *test)
343 struct drm_device *drm_dev;
345 /* Allocate a parent device */
346 dev = drm_kunit_helper_alloc_device(test);
347 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
350 * The DRM core will automatically initialize the GEM core and create
351 * a DRM Memory Manager object which provides an address space pool
352 * for GEM objects allocation.
354 drm_dev = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm_dev),
356 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm_dev);
358 test->priv = drm_dev;
363 static struct kunit_case drm_gem_shmem_test_cases[] = {
364 KUNIT_CASE(drm_gem_shmem_test_obj_create),
365 KUNIT_CASE(drm_gem_shmem_test_obj_create_private),
366 KUNIT_CASE(drm_gem_shmem_test_pin_pages),
367 KUNIT_CASE(drm_gem_shmem_test_vmap),
368 KUNIT_CASE(drm_gem_shmem_test_get_pages_sgt),
369 KUNIT_CASE(drm_gem_shmem_test_get_sg_table),
370 KUNIT_CASE(drm_gem_shmem_test_madvise),
371 KUNIT_CASE(drm_gem_shmem_test_purge),
375 static struct kunit_suite drm_gem_shmem_suite = {
376 .name = "drm_gem_shmem",
377 .init = drm_gem_shmem_test_init,
378 .test_cases = drm_gem_shmem_test_cases
381 kunit_test_suite(drm_gem_shmem_suite);
383 MODULE_LICENSE("GPL");