]> Git Repo - linux.git/blob - drivers/gpu/drm/ttm/tests/ttm_bo_test.c
Merge patch series "riscv: Extension parsing fixes"
[linux.git] / drivers / gpu / drm / ttm / tests / ttm_bo_test.c
1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 #include <linux/dma-resv.h>
6 #include <linux/kthread.h>
7 #include <linux/delay.h>
8 #include <linux/timer.h>
9 #include <linux/jiffies.h>
10 #include <linux/mutex.h>
11 #include <linux/ww_mutex.h>
12
13 #include <drm/ttm/ttm_resource.h>
14 #include <drm/ttm/ttm_placement.h>
15 #include <drm/ttm/ttm_tt.h>
16
17 #include "ttm_kunit_helpers.h"
18
19 #define BO_SIZE         SZ_8K
20
21 struct ttm_bo_test_case {
22         const char *description;
23         bool interruptible;
24         bool no_wait;
25 };
26
27 static const struct ttm_bo_test_case ttm_bo_reserved_cases[] = {
28         {
29                 .description = "Cannot be interrupted and sleeps",
30                 .interruptible = false,
31                 .no_wait = false,
32         },
33         {
34                 .description = "Cannot be interrupted, locks straight away",
35                 .interruptible = false,
36                 .no_wait = true,
37         },
38         {
39                 .description = "Can be interrupted, sleeps",
40                 .interruptible = true,
41                 .no_wait = false,
42         },
43 };
44
45 static void ttm_bo_init_case_desc(const struct ttm_bo_test_case *t,
46                                   char *desc)
47 {
48         strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
49 }
50
51 KUNIT_ARRAY_PARAM(ttm_bo_reserve, ttm_bo_reserved_cases, ttm_bo_init_case_desc);
52
53 static void ttm_bo_reserve_optimistic_no_ticket(struct kunit *test)
54 {
55         const struct ttm_bo_test_case *params = test->param_value;
56         struct ttm_buffer_object *bo;
57         int err;
58
59         bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
60
61         err = ttm_bo_reserve(bo, params->interruptible, params->no_wait, NULL);
62         KUNIT_ASSERT_EQ(test, err, 0);
63
64         dma_resv_unlock(bo->base.resv);
65 }
66
67 static void ttm_bo_reserve_locked_no_sleep(struct kunit *test)
68 {
69         struct ttm_buffer_object *bo;
70         bool interruptible = false;
71         bool no_wait = true;
72         int err;
73
74         bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
75
76         /* Let's lock it beforehand */
77         dma_resv_lock(bo->base.resv, NULL);
78
79         err = ttm_bo_reserve(bo, interruptible, no_wait, NULL);
80         dma_resv_unlock(bo->base.resv);
81
82         KUNIT_ASSERT_EQ(test, err, -EBUSY);
83 }
84
85 static void ttm_bo_reserve_no_wait_ticket(struct kunit *test)
86 {
87         struct ttm_buffer_object *bo;
88         struct ww_acquire_ctx ctx;
89         bool interruptible = false;
90         bool no_wait = true;
91         int err;
92
93         ww_acquire_init(&ctx, &reservation_ww_class);
94
95         bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
96
97         err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
98         KUNIT_ASSERT_EQ(test, err, -EBUSY);
99
100         ww_acquire_fini(&ctx);
101 }
102
103 static void ttm_bo_reserve_double_resv(struct kunit *test)
104 {
105         struct ttm_buffer_object *bo;
106         struct ww_acquire_ctx ctx;
107         bool interruptible = false;
108         bool no_wait = false;
109         int err;
110
111         ww_acquire_init(&ctx, &reservation_ww_class);
112
113         bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
114
115         err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
116         KUNIT_ASSERT_EQ(test, err, 0);
117
118         err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
119
120         dma_resv_unlock(bo->base.resv);
121         ww_acquire_fini(&ctx);
122
123         KUNIT_ASSERT_EQ(test, err, -EALREADY);
124 }
125
126 /*
127  * A test case heavily inspired by ww_test_edeadlk_normal(). It injects
128  * a deadlock by manipulating the sequence number of the context that holds
129  * dma_resv lock of bo2 so the other context is "wounded" and has to back off
130  * (indicated by -EDEADLK). The subtest checks if ttm_bo_reserve() properly
131  * propagates that error.
132  */
133 static void ttm_bo_reserve_deadlock(struct kunit *test)
134 {
135         struct ttm_buffer_object *bo1, *bo2;
136         struct ww_acquire_ctx ctx1, ctx2;
137         bool interruptible = false;
138         bool no_wait = false;
139         int err;
140
141         bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
142         bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
143
144         ww_acquire_init(&ctx1, &reservation_ww_class);
145         mutex_lock(&bo2->base.resv->lock.base);
146
147         /* The deadlock will be caught by WW mutex, don't warn about it */
148         lock_release(&bo2->base.resv->lock.base.dep_map, 1);
149
150         bo2->base.resv->lock.ctx = &ctx2;
151         ctx2 = ctx1;
152         ctx2.stamp--; /* Make the context holding the lock younger */
153
154         err = ttm_bo_reserve(bo1, interruptible, no_wait, &ctx1);
155         KUNIT_ASSERT_EQ(test, err, 0);
156
157         err = ttm_bo_reserve(bo2, interruptible, no_wait, &ctx1);
158         KUNIT_ASSERT_EQ(test, err, -EDEADLK);
159
160         dma_resv_unlock(bo1->base.resv);
161         ww_acquire_fini(&ctx1);
162 }
163
164 #if IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST)
165 struct signal_timer {
166         struct timer_list timer;
167         struct ww_acquire_ctx *ctx;
168 };
169
170 static void signal_for_ttm_bo_reserve(struct timer_list *t)
171 {
172         struct signal_timer *s_timer = from_timer(s_timer, t, timer);
173         struct task_struct *task = s_timer->ctx->task;
174
175         do_send_sig_info(SIGTERM, SEND_SIG_PRIV, task, PIDTYPE_PID);
176 }
177
178 static int threaded_ttm_bo_reserve(void *arg)
179 {
180         struct ttm_buffer_object *bo = arg;
181         struct signal_timer s_timer;
182         struct ww_acquire_ctx ctx;
183         bool interruptible = true;
184         bool no_wait = false;
185         int err;
186
187         ww_acquire_init(&ctx, &reservation_ww_class);
188
189         /* Prepare a signal that will interrupt the reservation attempt */
190         timer_setup_on_stack(&s_timer.timer, &signal_for_ttm_bo_reserve, 0);
191         s_timer.ctx = &ctx;
192
193         mod_timer(&s_timer.timer, msecs_to_jiffies(100));
194
195         err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
196
197         timer_delete_sync(&s_timer.timer);
198         destroy_timer_on_stack(&s_timer.timer);
199
200         ww_acquire_fini(&ctx);
201
202         return err;
203 }
204
205 static void ttm_bo_reserve_interrupted(struct kunit *test)
206 {
207         struct ttm_buffer_object *bo;
208         struct task_struct *task;
209         int err;
210
211         bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
212
213         task = kthread_create(threaded_ttm_bo_reserve, bo, "ttm-bo-reserve");
214
215         if (IS_ERR(task))
216                 KUNIT_FAIL(test, "Couldn't create ttm bo reserve task\n");
217
218         /* Take a lock so the threaded reserve has to wait */
219         mutex_lock(&bo->base.resv->lock.base);
220
221         wake_up_process(task);
222         msleep(20);
223         err = kthread_stop(task);
224
225         mutex_unlock(&bo->base.resv->lock.base);
226
227         KUNIT_ASSERT_EQ(test, err, -ERESTARTSYS);
228 }
229 #endif /* IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST) */
230
231 static void ttm_bo_unreserve_basic(struct kunit *test)
232 {
233         struct ttm_test_devices *priv = test->priv;
234         struct ttm_buffer_object *bo;
235         struct ttm_device *ttm_dev;
236         struct ttm_resource *res1, *res2;
237         struct ttm_place *place;
238         struct ttm_resource_manager *man;
239         unsigned int bo_prio = TTM_MAX_BO_PRIORITY - 1;
240         uint32_t mem_type = TTM_PL_SYSTEM;
241         int err;
242
243         place = ttm_place_kunit_init(test, mem_type, 0);
244
245         ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
246         KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
247
248         err = ttm_device_kunit_init(priv, ttm_dev, false, false);
249         KUNIT_ASSERT_EQ(test, err, 0);
250         priv->ttm_dev = ttm_dev;
251
252         bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
253         bo->priority = bo_prio;
254
255         err = ttm_resource_alloc(bo, place, &res1);
256         KUNIT_ASSERT_EQ(test, err, 0);
257
258         bo->resource = res1;
259
260         /* Add a dummy resource to populate LRU */
261         ttm_resource_alloc(bo, place, &res2);
262
263         dma_resv_lock(bo->base.resv, NULL);
264         ttm_bo_unreserve(bo);
265
266         man = ttm_manager_type(priv->ttm_dev, mem_type);
267         KUNIT_ASSERT_EQ(test,
268                         list_is_last(&res1->lru, &man->lru[bo->priority]), 1);
269
270         ttm_resource_free(bo, &res2);
271         ttm_resource_free(bo, &res1);
272 }
273
274 static void ttm_bo_unreserve_pinned(struct kunit *test)
275 {
276         struct ttm_test_devices *priv = test->priv;
277         struct ttm_buffer_object *bo;
278         struct ttm_device *ttm_dev;
279         struct ttm_resource *res1, *res2;
280         struct ttm_place *place;
281         uint32_t mem_type = TTM_PL_SYSTEM;
282         int err;
283
284         ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
285         KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
286
287         err = ttm_device_kunit_init(priv, ttm_dev, false, false);
288         KUNIT_ASSERT_EQ(test, err, 0);
289         priv->ttm_dev = ttm_dev;
290
291         bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
292         place = ttm_place_kunit_init(test, mem_type, 0);
293
294         dma_resv_lock(bo->base.resv, NULL);
295         ttm_bo_pin(bo);
296
297         err = ttm_resource_alloc(bo, place, &res1);
298         KUNIT_ASSERT_EQ(test, err, 0);
299         bo->resource = res1;
300
301         /* Add a dummy resource to the pinned list */
302         err = ttm_resource_alloc(bo, place, &res2);
303         KUNIT_ASSERT_EQ(test, err, 0);
304         KUNIT_ASSERT_EQ(test,
305                         list_is_last(&res2->lru, &priv->ttm_dev->pinned), 1);
306
307         ttm_bo_unreserve(bo);
308         KUNIT_ASSERT_EQ(test,
309                         list_is_last(&res1->lru, &priv->ttm_dev->pinned), 1);
310
311         ttm_resource_free(bo, &res1);
312         ttm_resource_free(bo, &res2);
313 }
314
315 static void ttm_bo_unreserve_bulk(struct kunit *test)
316 {
317         struct ttm_test_devices *priv = test->priv;
318         struct ttm_lru_bulk_move lru_bulk_move;
319         struct ttm_lru_bulk_move_pos *pos;
320         struct ttm_buffer_object *bo1, *bo2;
321         struct ttm_resource *res1, *res2;
322         struct ttm_device *ttm_dev;
323         struct ttm_place *place;
324         uint32_t mem_type = TTM_PL_SYSTEM;
325         unsigned int bo_priority = 0;
326         int err;
327
328         ttm_lru_bulk_move_init(&lru_bulk_move);
329
330         place = ttm_place_kunit_init(test, mem_type, 0);
331
332         ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
333         KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
334
335         err = ttm_device_kunit_init(priv, ttm_dev, false, false);
336         KUNIT_ASSERT_EQ(test, err, 0);
337         priv->ttm_dev = ttm_dev;
338
339         bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
340         bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
341
342         dma_resv_lock(bo1->base.resv, NULL);
343         ttm_bo_set_bulk_move(bo1, &lru_bulk_move);
344         dma_resv_unlock(bo1->base.resv);
345
346         err = ttm_resource_alloc(bo1, place, &res1);
347         KUNIT_ASSERT_EQ(test, err, 0);
348         bo1->resource = res1;
349
350         dma_resv_lock(bo2->base.resv, NULL);
351         ttm_bo_set_bulk_move(bo2, &lru_bulk_move);
352         dma_resv_unlock(bo2->base.resv);
353
354         err = ttm_resource_alloc(bo2, place, &res2);
355         KUNIT_ASSERT_EQ(test, err, 0);
356         bo2->resource = res2;
357
358         ttm_bo_reserve(bo1, false, false, NULL);
359         ttm_bo_unreserve(bo1);
360
361         pos = &lru_bulk_move.pos[mem_type][bo_priority];
362         KUNIT_ASSERT_PTR_EQ(test, res1, pos->last);
363
364         ttm_resource_free(bo1, &res1);
365         ttm_resource_free(bo2, &res2);
366 }
367
368 static void ttm_bo_put_basic(struct kunit *test)
369 {
370         struct ttm_test_devices *priv = test->priv;
371         struct ttm_buffer_object *bo;
372         struct ttm_resource *res;
373         struct ttm_device *ttm_dev;
374         struct ttm_place *place;
375         uint32_t mem_type = TTM_PL_SYSTEM;
376         int err;
377
378         place = ttm_place_kunit_init(test, mem_type, 0);
379
380         ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
381         KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
382
383         err = ttm_device_kunit_init(priv, ttm_dev, false, false);
384         KUNIT_ASSERT_EQ(test, err, 0);
385         priv->ttm_dev = ttm_dev;
386
387         bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
388         bo->type = ttm_bo_type_device;
389
390         err = ttm_resource_alloc(bo, place, &res);
391         KUNIT_ASSERT_EQ(test, err, 0);
392         bo->resource = res;
393
394         dma_resv_lock(bo->base.resv, NULL);
395         err = ttm_tt_create(bo, false);
396         dma_resv_unlock(bo->base.resv);
397         KUNIT_EXPECT_EQ(test, err, 0);
398
399         ttm_bo_put(bo);
400 }
401
402 static const char *mock_name(struct dma_fence *f)
403 {
404         return "kunit-ttm-bo-put";
405 }
406
407 static const struct dma_fence_ops mock_fence_ops = {
408         .get_driver_name = mock_name,
409         .get_timeline_name = mock_name,
410 };
411
412 static void ttm_bo_put_shared_resv(struct kunit *test)
413 {
414         struct ttm_test_devices *priv = test->priv;
415         struct ttm_buffer_object *bo;
416         struct dma_resv *external_resv;
417         struct dma_fence *fence;
418         /* A dummy DMA fence lock */
419         spinlock_t fence_lock;
420         struct ttm_device *ttm_dev;
421         int err;
422
423         ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
424         KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
425
426         err = ttm_device_kunit_init(priv, ttm_dev, false, false);
427         KUNIT_ASSERT_EQ(test, err, 0);
428         priv->ttm_dev = ttm_dev;
429
430         external_resv = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
431         KUNIT_ASSERT_NOT_NULL(test, external_resv);
432
433         dma_resv_init(external_resv);
434
435         fence = kunit_kzalloc(test, sizeof(*fence), GFP_KERNEL);
436         KUNIT_ASSERT_NOT_NULL(test, fence);
437
438         spin_lock_init(&fence_lock);
439         dma_fence_init(fence, &mock_fence_ops, &fence_lock, 0, 0);
440
441         dma_resv_lock(external_resv, NULL);
442         dma_resv_reserve_fences(external_resv, 1);
443         dma_resv_add_fence(external_resv, fence, DMA_RESV_USAGE_BOOKKEEP);
444         dma_resv_unlock(external_resv);
445
446         dma_fence_signal(fence);
447
448         bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
449         bo->type = ttm_bo_type_device;
450         bo->base.resv = external_resv;
451
452         ttm_bo_put(bo);
453 }
454
455 static void ttm_bo_pin_basic(struct kunit *test)
456 {
457         struct ttm_test_devices *priv = test->priv;
458         struct ttm_buffer_object *bo;
459         struct ttm_device *ttm_dev;
460         unsigned int no_pins = 3;
461         int err;
462
463         ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
464         KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
465
466         err = ttm_device_kunit_init(priv, ttm_dev, false, false);
467         KUNIT_ASSERT_EQ(test, err, 0);
468         priv->ttm_dev = ttm_dev;
469
470         bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
471
472         for (int i = 0; i < no_pins; i++) {
473                 dma_resv_lock(bo->base.resv, NULL);
474                 ttm_bo_pin(bo);
475                 dma_resv_unlock(bo->base.resv);
476         }
477
478         KUNIT_ASSERT_EQ(test, bo->pin_count, no_pins);
479 }
480
481 static void ttm_bo_pin_unpin_resource(struct kunit *test)
482 {
483         struct ttm_test_devices *priv = test->priv;
484         struct ttm_lru_bulk_move lru_bulk_move;
485         struct ttm_lru_bulk_move_pos *pos;
486         struct ttm_buffer_object *bo;
487         struct ttm_resource *res;
488         struct ttm_device *ttm_dev;
489         struct ttm_place *place;
490         uint32_t mem_type = TTM_PL_SYSTEM;
491         unsigned int bo_priority = 0;
492         int err;
493
494         ttm_lru_bulk_move_init(&lru_bulk_move);
495
496         place = ttm_place_kunit_init(test, mem_type, 0);
497
498         ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
499         KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
500
501         err = ttm_device_kunit_init(priv, ttm_dev, false, false);
502         KUNIT_ASSERT_EQ(test, err, 0);
503         priv->ttm_dev = ttm_dev;
504
505         bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
506
507         err = ttm_resource_alloc(bo, place, &res);
508         KUNIT_ASSERT_EQ(test, err, 0);
509         bo->resource = res;
510
511         dma_resv_lock(bo->base.resv, NULL);
512         ttm_bo_set_bulk_move(bo, &lru_bulk_move);
513         ttm_bo_pin(bo);
514         dma_resv_unlock(bo->base.resv);
515
516         pos = &lru_bulk_move.pos[mem_type][bo_priority];
517
518         KUNIT_ASSERT_EQ(test, bo->pin_count, 1);
519         KUNIT_ASSERT_NULL(test, pos->first);
520         KUNIT_ASSERT_NULL(test, pos->last);
521
522         dma_resv_lock(bo->base.resv, NULL);
523         ttm_bo_unpin(bo);
524         dma_resv_unlock(bo->base.resv);
525
526         KUNIT_ASSERT_PTR_EQ(test, res, pos->last);
527         KUNIT_ASSERT_EQ(test, bo->pin_count, 0);
528
529         ttm_resource_free(bo, &res);
530 }
531
532 static void ttm_bo_multiple_pin_one_unpin(struct kunit *test)
533 {
534         struct ttm_test_devices *priv = test->priv;
535         struct ttm_lru_bulk_move lru_bulk_move;
536         struct ttm_lru_bulk_move_pos *pos;
537         struct ttm_buffer_object *bo;
538         struct ttm_resource *res;
539         struct ttm_device *ttm_dev;
540         struct ttm_place *place;
541         uint32_t mem_type = TTM_PL_SYSTEM;
542         unsigned int bo_priority = 0;
543         int err;
544
545         ttm_lru_bulk_move_init(&lru_bulk_move);
546
547         place = ttm_place_kunit_init(test, mem_type, 0);
548
549         ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
550         KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
551
552         err = ttm_device_kunit_init(priv, ttm_dev, false, false);
553         KUNIT_ASSERT_EQ(test, err, 0);
554         priv->ttm_dev = ttm_dev;
555
556         bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
557
558         err = ttm_resource_alloc(bo, place, &res);
559         KUNIT_ASSERT_EQ(test, err, 0);
560         bo->resource = res;
561
562         dma_resv_lock(bo->base.resv, NULL);
563         ttm_bo_set_bulk_move(bo, &lru_bulk_move);
564
565         /* Multiple pins */
566         ttm_bo_pin(bo);
567         ttm_bo_pin(bo);
568
569         dma_resv_unlock(bo->base.resv);
570
571         pos = &lru_bulk_move.pos[mem_type][bo_priority];
572
573         KUNIT_ASSERT_EQ(test, bo->pin_count, 2);
574         KUNIT_ASSERT_NULL(test, pos->first);
575         KUNIT_ASSERT_NULL(test, pos->last);
576
577         dma_resv_lock(bo->base.resv, NULL);
578         ttm_bo_unpin(bo);
579         dma_resv_unlock(bo->base.resv);
580
581         KUNIT_ASSERT_EQ(test, bo->pin_count, 1);
582         KUNIT_ASSERT_NULL(test, pos->first);
583         KUNIT_ASSERT_NULL(test, pos->last);
584
585         dma_resv_lock(bo->base.resv, NULL);
586         ttm_bo_unpin(bo);
587         dma_resv_unlock(bo->base.resv);
588
589         ttm_resource_free(bo, &res);
590 }
591
592 static struct kunit_case ttm_bo_test_cases[] = {
593         KUNIT_CASE_PARAM(ttm_bo_reserve_optimistic_no_ticket,
594                          ttm_bo_reserve_gen_params),
595         KUNIT_CASE(ttm_bo_reserve_locked_no_sleep),
596         KUNIT_CASE(ttm_bo_reserve_no_wait_ticket),
597         KUNIT_CASE(ttm_bo_reserve_double_resv),
598 #if IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST)
599         KUNIT_CASE(ttm_bo_reserve_interrupted),
600 #endif
601         KUNIT_CASE(ttm_bo_reserve_deadlock),
602         KUNIT_CASE(ttm_bo_unreserve_basic),
603         KUNIT_CASE(ttm_bo_unreserve_pinned),
604         KUNIT_CASE(ttm_bo_unreserve_bulk),
605         KUNIT_CASE(ttm_bo_put_basic),
606         KUNIT_CASE(ttm_bo_put_shared_resv),
607         KUNIT_CASE(ttm_bo_pin_basic),
608         KUNIT_CASE(ttm_bo_pin_unpin_resource),
609         KUNIT_CASE(ttm_bo_multiple_pin_one_unpin),
610         {}
611 };
612
613 static struct kunit_suite ttm_bo_test_suite = {
614         .name = "ttm_bo",
615         .init = ttm_test_devices_init,
616         .exit = ttm_test_devices_fini,
617         .test_cases = ttm_bo_test_cases,
618 };
619
620 kunit_test_suites(&ttm_bo_test_suite);
621
622 MODULE_LICENSE("GPL");
This page took 0.06884 seconds and 4 git commands to generate.