1 // SPDX-License-Identifier: GPL-2.0
3 * KUnit test suite for GEM objects backed by shmem buffers
5 * Copyright (C) 2023 Red Hat, Inc.
10 #include <linux/dma-buf.h>
11 #include <linux/iosys-map.h>
12 #include <linux/sizes.h>
14 #include <kunit/test.h>
16 #include <drm/drm_device.h>
17 #include <drm/drm_drv.h>
18 #include <drm/drm_gem.h>
19 #include <drm/drm_gem_shmem_helper.h>
20 #include <drm/drm_kunit_helpers.h>
22 #define TEST_SIZE SZ_1M
23 #define TEST_BYTE 0xae
26 * Wrappers to avoid cast warnings when passing action functions
27 * directly to kunit_add_action().
29 KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *);
31 KUNIT_DEFINE_ACTION_WRAPPER(sg_free_table_wrapper, sg_free_table,
34 KUNIT_DEFINE_ACTION_WRAPPER(drm_gem_shmem_free_wrapper, drm_gem_shmem_free,
35 struct drm_gem_shmem_object *);
38 * Test creating a shmem GEM object backed by shmem buffer. The test
39 * case succeeds if the GEM object is successfully allocated with the
40 * shmem file node and object functions attributes set, and the size
41 * attribute is equal to the correct size.
43 static void drm_gem_shmem_test_obj_create(struct kunit *test)
45 struct drm_device *drm_dev = test->priv;
46 struct drm_gem_shmem_object *shmem;
48 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
49 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
50 KUNIT_EXPECT_EQ(test, shmem->base.size, TEST_SIZE);
51 KUNIT_EXPECT_NOT_NULL(test, shmem->base.filp);
52 KUNIT_EXPECT_NOT_NULL(test, shmem->base.funcs);
54 drm_gem_shmem_free(shmem);
58 * Test creating a shmem GEM object from a scatter/gather table exported
59 * via a DMA-BUF. The test case succeed if the GEM object is successfully
60 * created with the shmem file node attribute equal to NULL and the sgt
61 * attribute pointing to the scatter/gather table that has been imported.
63 static void drm_gem_shmem_test_obj_create_private(struct kunit *test)
65 struct drm_device *drm_dev = test->priv;
66 struct drm_gem_shmem_object *shmem;
67 struct drm_gem_object *gem_obj;
68 struct dma_buf buf_mock;
69 struct dma_buf_attachment attach_mock;
74 /* Create a mock scatter/gather table */
75 buf = kunit_kzalloc(test, TEST_SIZE, GFP_KERNEL);
76 KUNIT_ASSERT_NOT_NULL(test, buf);
78 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
79 KUNIT_ASSERT_NOT_NULL(test, sgt);
81 ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
82 KUNIT_ASSERT_EQ(test, ret, 0);
84 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
85 KUNIT_ASSERT_EQ(test, ret, 0);
87 ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
88 KUNIT_ASSERT_EQ(test, ret, 0);
90 sg_init_one(sgt->sgl, buf, TEST_SIZE);
93 * Set the DMA mask to 64-bits and map the sgtables
94 * otherwise drm_gem_shmem_free will cause a warning
97 ret = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(64));
98 KUNIT_ASSERT_EQ(test, ret, 0);
100 ret = dma_map_sgtable(drm_dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
101 KUNIT_ASSERT_EQ(test, ret, 0);
103 /* Init a mock DMA-BUF */
104 buf_mock.size = TEST_SIZE;
105 attach_mock.dmabuf = &buf_mock;
107 gem_obj = drm_gem_shmem_prime_import_sg_table(drm_dev, &attach_mock, sgt);
108 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gem_obj);
109 KUNIT_EXPECT_EQ(test, gem_obj->size, TEST_SIZE);
110 KUNIT_EXPECT_NULL(test, gem_obj->filp);
111 KUNIT_EXPECT_NOT_NULL(test, gem_obj->funcs);
113 /* The scatter/gather table will be freed by drm_gem_shmem_free */
114 kunit_remove_action(test, sg_free_table_wrapper, sgt);
115 kunit_remove_action(test, kfree_wrapper, sgt);
117 shmem = to_drm_gem_shmem_obj(gem_obj);
118 KUNIT_EXPECT_PTR_EQ(test, shmem->sgt, sgt);
120 drm_gem_shmem_free(shmem);
124 * Test pinning backing pages for a shmem GEM object. The test case
125 * succeeds if a suitable number of backing pages are allocated, and
126 * the pages table counter attribute is increased by one.
128 static void drm_gem_shmem_test_pin_pages(struct kunit *test)
130 struct drm_device *drm_dev = test->priv;
131 struct drm_gem_shmem_object *shmem;
134 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
135 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
136 KUNIT_EXPECT_NULL(test, shmem->pages);
137 KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
139 ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
140 KUNIT_ASSERT_EQ(test, ret, 0);
142 ret = drm_gem_shmem_pin(shmem);
143 KUNIT_ASSERT_EQ(test, ret, 0);
144 KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
145 KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
147 for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++)
148 KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]);
150 drm_gem_shmem_unpin(shmem);
151 KUNIT_EXPECT_NULL(test, shmem->pages);
152 KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
156 * Test creating a virtual mapping for a shmem GEM object. The test
157 * case succeeds if the backing memory is mapped and the reference
158 * counter for virtual mapping is increased by one. Moreover, the test
159 * case writes and then reads a test pattern over the mapped memory.
161 static void drm_gem_shmem_test_vmap(struct kunit *test)
163 struct drm_device *drm_dev = test->priv;
164 struct drm_gem_shmem_object *shmem;
165 struct iosys_map map;
168 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
169 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
170 KUNIT_EXPECT_NULL(test, shmem->vaddr);
171 KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
173 ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
174 KUNIT_ASSERT_EQ(test, ret, 0);
176 ret = drm_gem_shmem_vmap(shmem, &map);
177 KUNIT_ASSERT_EQ(test, ret, 0);
178 KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
179 KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
180 KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1);
182 iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
183 for (i = 0; i < TEST_SIZE; i++)
184 KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE);
186 drm_gem_shmem_vunmap(shmem, &map);
187 KUNIT_EXPECT_NULL(test, shmem->vaddr);
188 KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
192 * Test exporting a scatter/gather table of pinned pages suitable for
193 * PRIME usage from a shmem GEM object. The test case succeeds if a
194 * scatter/gather table large enough to accommodate the backing memory
195 * is successfully exported.
197 static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test)
199 struct drm_device *drm_dev = test->priv;
200 struct drm_gem_shmem_object *shmem;
201 struct sg_table *sgt;
202 struct scatterlist *sg;
203 unsigned int si, len = 0;
206 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
207 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
209 ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
210 KUNIT_ASSERT_EQ(test, ret, 0);
212 ret = drm_gem_shmem_pin(shmem);
213 KUNIT_ASSERT_EQ(test, ret, 0);
215 sgt = drm_gem_shmem_get_sg_table(shmem);
216 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
217 KUNIT_EXPECT_NULL(test, shmem->sgt);
219 ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
220 KUNIT_ASSERT_EQ(test, ret, 0);
222 for_each_sgtable_sg(sgt, sg, si) {
223 KUNIT_EXPECT_NOT_NULL(test, sg);
227 KUNIT_EXPECT_GE(test, len, TEST_SIZE);
231 * Test pinning pages and exporting a scatter/gather table suitable for
232 * driver usage from a shmem GEM object. The test case succeeds if the
233 * backing pages are pinned and a scatter/gather table large enough to
234 * accommodate the backing memory is successfully exported.
236 static void drm_gem_shmem_test_get_sg_table(struct kunit *test)
238 struct drm_device *drm_dev = test->priv;
239 struct drm_gem_shmem_object *shmem;
240 struct sg_table *sgt;
241 struct scatterlist *sg;
242 unsigned int si, ret, len = 0;
244 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
245 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
247 ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
248 KUNIT_ASSERT_EQ(test, ret, 0);
250 /* The scatter/gather table will be freed by drm_gem_shmem_free */
251 sgt = drm_gem_shmem_get_pages_sgt(shmem);
252 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
253 KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
254 KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
255 KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt);
257 for_each_sgtable_sg(sgt, sg, si) {
258 KUNIT_EXPECT_NOT_NULL(test, sg);
262 KUNIT_EXPECT_GE(test, len, TEST_SIZE);
266 * Test updating the madvise state of a shmem GEM object. The test
267 * case checks that the function for setting madv updates it only if
268 * its current value is greater or equal than zero and returns false
269 * if it has a negative value.
271 static void drm_gem_shmem_test_madvise(struct kunit *test)
273 struct drm_device *drm_dev = test->priv;
274 struct drm_gem_shmem_object *shmem;
277 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
278 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
279 KUNIT_ASSERT_EQ(test, shmem->madv, 0);
281 ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
282 KUNIT_ASSERT_EQ(test, ret, 0);
284 ret = drm_gem_shmem_madvise(shmem, 1);
285 KUNIT_EXPECT_TRUE(test, ret);
286 KUNIT_ASSERT_EQ(test, shmem->madv, 1);
288 /* Set madv to a negative value */
289 ret = drm_gem_shmem_madvise(shmem, -1);
290 KUNIT_EXPECT_FALSE(test, ret);
291 KUNIT_ASSERT_EQ(test, shmem->madv, -1);
293 /* Check that madv cannot be set back to a positive value */
294 ret = drm_gem_shmem_madvise(shmem, 0);
295 KUNIT_EXPECT_FALSE(test, ret);
296 KUNIT_ASSERT_EQ(test, shmem->madv, -1);
300 * Test purging a shmem GEM object. First, assert that a newly created
301 * shmem GEM object is not purgeable. Then, set madvise to a positive
302 * value and call drm_gem_shmem_get_pages_sgt() to pin and dma-map the
303 * backing pages. Finally, assert that the shmem GEM object is now
304 * purgeable and purge it.
306 static void drm_gem_shmem_test_purge(struct kunit *test)
308 struct drm_device *drm_dev = test->priv;
309 struct drm_gem_shmem_object *shmem;
310 struct sg_table *sgt;
313 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
314 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
316 ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
317 KUNIT_ASSERT_EQ(test, ret, 0);
319 ret = drm_gem_shmem_is_purgeable(shmem);
320 KUNIT_EXPECT_FALSE(test, ret);
322 ret = drm_gem_shmem_madvise(shmem, 1);
323 KUNIT_EXPECT_TRUE(test, ret);
325 /* The scatter/gather table will be freed by drm_gem_shmem_free */
326 sgt = drm_gem_shmem_get_pages_sgt(shmem);
327 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
329 ret = drm_gem_shmem_is_purgeable(shmem);
330 KUNIT_EXPECT_TRUE(test, ret);
332 drm_gem_shmem_purge(shmem);
333 KUNIT_EXPECT_NULL(test, shmem->pages);
334 KUNIT_EXPECT_NULL(test, shmem->sgt);
335 KUNIT_EXPECT_EQ(test, shmem->madv, -1);
338 static int drm_gem_shmem_test_init(struct kunit *test)
341 struct drm_device *drm_dev;
343 /* Allocate a parent device */
344 dev = drm_kunit_helper_alloc_device(test);
345 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
348 * The DRM core will automatically initialize the GEM core and create
349 * a DRM Memory Manager object which provides an address space pool
350 * for GEM objects allocation.
352 drm_dev = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm_dev),
354 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm_dev);
356 test->priv = drm_dev;
361 static struct kunit_case drm_gem_shmem_test_cases[] = {
362 KUNIT_CASE(drm_gem_shmem_test_obj_create),
363 KUNIT_CASE(drm_gem_shmem_test_obj_create_private),
364 KUNIT_CASE(drm_gem_shmem_test_pin_pages),
365 KUNIT_CASE(drm_gem_shmem_test_vmap),
366 KUNIT_CASE(drm_gem_shmem_test_get_pages_sgt),
367 KUNIT_CASE(drm_gem_shmem_test_get_sg_table),
368 KUNIT_CASE(drm_gem_shmem_test_madvise),
369 KUNIT_CASE(drm_gem_shmem_test_purge),
373 static struct kunit_suite drm_gem_shmem_suite = {
374 .name = "drm_gem_shmem",
375 .init = drm_gem_shmem_test_init,
376 .test_cases = drm_gem_shmem_test_cases
379 kunit_test_suite(drm_gem_shmem_suite);
381 MODULE_DESCRIPTION("KUnit test suite for GEM objects backed by shmem buffers");
382 MODULE_LICENSE("GPL");