1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
7 #include <kunit/test.h>
9 #include <linux/prime_numbers.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sizes.h>
13 #include <drm/drm_buddy.h>
15 #include "../lib/drm_random.h"
17 static unsigned int random_seed;
19 static inline u64 get_size(int order, u64 chunk_size)
21 return (1 << order) * chunk_size;
24 static void drm_test_buddy_alloc_range_bias(struct kunit *test)
26 u32 mm_size, size, ps, bias_size, bias_start, bias_end, bias_rem;
27 DRM_RND_STATE(prng, random_seed);
28 unsigned int i, count, *order;
29 struct drm_buddy_block *block;
35 ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size);
37 mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */
39 kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps);
41 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
42 "buddy_init failed\n");
44 count = mm_size / bias_size;
45 order = drm_random_order(count, &prng);
46 KUNIT_EXPECT_TRUE(test, order);
49 * Idea is to split the address space into uniform bias ranges, and then
50 * in some random order allocate within each bias, using various
51 * patterns within. This should detect if allocations leak out from a
52 * given bias, for example.
55 for (i = 0; i < count; i++) {
59 bias_start = order[i] * bias_size;
60 bias_end = bias_start + bias_size;
63 /* internal round_up too big */
64 KUNIT_ASSERT_TRUE_MSG(test,
65 drm_buddy_alloc_blocks(&mm, bias_start,
66 bias_end, bias_size + ps, bias_size,
68 DRM_BUDDY_RANGE_ALLOCATION),
69 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
70 bias_start, bias_end, bias_size, bias_size);
73 KUNIT_ASSERT_TRUE_MSG(test,
74 drm_buddy_alloc_blocks(&mm, bias_start,
75 bias_end, bias_size + ps, ps,
77 DRM_BUDDY_RANGE_ALLOCATION),
78 "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
79 bias_start, bias_end, bias_size + ps, ps);
81 /* bias range too small for size */
82 KUNIT_ASSERT_TRUE_MSG(test,
83 drm_buddy_alloc_blocks(&mm, bias_start + ps,
84 bias_end, bias_size, ps,
86 DRM_BUDDY_RANGE_ALLOCATION),
87 "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
88 bias_start + ps, bias_end, bias_size, ps);
91 KUNIT_ASSERT_TRUE_MSG(test,
92 drm_buddy_alloc_blocks(&mm, bias_start + ps,
94 bias_size >> 1, bias_size >> 1,
96 DRM_BUDDY_RANGE_ALLOCATION),
97 "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n",
98 bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1);
100 /* single big page */
101 KUNIT_ASSERT_FALSE_MSG(test,
102 drm_buddy_alloc_blocks(&mm, bias_start,
103 bias_end, bias_size, bias_size,
105 DRM_BUDDY_RANGE_ALLOCATION),
106 "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n",
107 bias_start, bias_end, bias_size, bias_size);
108 drm_buddy_free_list(&mm, &tmp, 0);
110 /* single page with internal round_up */
111 KUNIT_ASSERT_FALSE_MSG(test,
112 drm_buddy_alloc_blocks(&mm, bias_start,
113 bias_end, ps, bias_size,
115 DRM_BUDDY_RANGE_ALLOCATION),
116 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
117 bias_start, bias_end, ps, bias_size);
118 drm_buddy_free_list(&mm, &tmp, 0);
120 /* random size within */
121 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
123 KUNIT_ASSERT_FALSE_MSG(test,
124 drm_buddy_alloc_blocks(&mm, bias_start,
127 DRM_BUDDY_RANGE_ALLOCATION),
128 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
129 bias_start, bias_end, size, ps);
132 /* too big for current avail */
133 KUNIT_ASSERT_TRUE_MSG(test,
134 drm_buddy_alloc_blocks(&mm, bias_start,
135 bias_end, bias_rem + ps, ps,
137 DRM_BUDDY_RANGE_ALLOCATION),
138 "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
139 bias_start, bias_end, bias_rem + ps, ps);
142 /* random fill of the remainder */
143 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
144 size = max(size, ps);
146 KUNIT_ASSERT_FALSE_MSG(test,
147 drm_buddy_alloc_blocks(&mm, bias_start,
150 DRM_BUDDY_RANGE_ALLOCATION),
151 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
152 bias_start, bias_end, size, ps);
154 * Intentionally allow some space to be left
155 * unallocated, and ideally not always on the bias
158 drm_buddy_free_list(&mm, &tmp, 0);
160 list_splice_tail(&tmp, &allocated);
165 drm_buddy_free_list(&mm, &allocated, 0);
169 * Something more free-form. Idea is to pick a random starting bias
170 * range within the address space and then start filling it up. Also
171 * randomly grow the bias range in both directions as we go along. This
172 * should give us bias start/end which is not always uniform like above,
173 * and in some cases will require the allocator to jump over already
174 * allocated nodes in the middle of the address space.
177 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
178 "buddy_init failed\n");
180 bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
181 bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
182 bias_end = max(bias_end, bias_start + ps);
183 bias_rem = bias_end - bias_start;
186 u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
188 KUNIT_ASSERT_FALSE_MSG(test,
189 drm_buddy_alloc_blocks(&mm, bias_start,
192 DRM_BUDDY_RANGE_ALLOCATION),
193 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
194 bias_start, bias_end, size, ps);
198 * Try to randomly grow the bias range in both directions, or
199 * only one, or perhaps don't grow at all.
202 u32 old_bias_start = bias_start;
203 u32 old_bias_end = bias_end;
206 bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps);
207 if (bias_end != mm_size)
208 bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps);
210 bias_rem += old_bias_start - bias_start;
211 bias_rem += bias_end - old_bias_end;
212 } while (!bias_rem && (bias_start || bias_end != mm_size));
215 KUNIT_ASSERT_EQ(test, bias_start, 0);
216 KUNIT_ASSERT_EQ(test, bias_end, mm_size);
217 KUNIT_ASSERT_TRUE_MSG(test,
218 drm_buddy_alloc_blocks(&mm, bias_start, bias_end,
221 DRM_BUDDY_RANGE_ALLOCATION),
222 "buddy_alloc passed with bias(%x-%x), size=%u\n",
223 bias_start, bias_end, ps);
225 drm_buddy_free_list(&mm, &allocated, 0);
229 * Allocate cleared blocks in the bias range when the DRM buddy's clear avail is
230 * zero. This will validate the bias range allocation in scenarios like system boot
231 * when no cleared blocks are available and exercise the fallback path too. The resulting
232 * blocks should always be dirty.
235 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
236 "buddy_init failed\n");
238 bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
239 bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
240 bias_end = max(bias_end, bias_start + ps);
241 bias_rem = bias_end - bias_start;
243 flags = DRM_BUDDY_CLEAR_ALLOCATION | DRM_BUDDY_RANGE_ALLOCATION;
244 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
246 KUNIT_ASSERT_FALSE_MSG(test,
247 drm_buddy_alloc_blocks(&mm, bias_start,
251 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
252 bias_start, bias_end, size, ps);
254 list_for_each_entry(block, &allocated, link)
255 KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
257 drm_buddy_free_list(&mm, &allocated, 0);
261 static void drm_test_buddy_alloc_clear(struct kunit *test)
263 unsigned long n_pages, total, i = 0;
264 DRM_RND_STATE(prng, random_seed);
265 const unsigned long ps = SZ_4K;
266 struct drm_buddy_block *block;
267 const int max_order = 12;
268 LIST_HEAD(allocated);
275 mm_size = SZ_4K << max_order;
276 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
278 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
281 * Idea is to allocate and free some random portion of the address space,
282 * returning those pages as non-dirty and randomly alternate between
283 * requesting dirty and non-dirty pages (not going over the limit
284 * we freed as non-dirty), putting that into two separate lists.
285 * Loop over both lists at the end checking that the dirty list
286 * is indeed all dirty pages and vice versa. Free it all again,
287 * keeping the dirty/clear status.
289 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
290 5 * ps, ps, &allocated,
291 DRM_BUDDY_TOPDOWN_ALLOCATION),
292 "buddy_alloc hit an error size=%lu\n", 5 * ps);
293 drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
298 struct list_head *list;
306 flags = DRM_BUDDY_CLEAR_ALLOCATION;
309 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
312 "buddy_alloc hit an error size=%lu\n", ps);
313 } while (++i < n_pages);
315 list_for_each_entry(block, &clean, link)
316 KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true);
318 list_for_each_entry(block, &dirty, link)
319 KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
321 drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
324 * Trying to go over the clear limit for some allocation.
325 * The allocation should never fail with reasonable page-size.
327 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
329 DRM_BUDDY_CLEAR_ALLOCATION),
330 "buddy_alloc hit an error size=%lu\n", 10 * ps);
332 drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
333 drm_buddy_free_list(&mm, &dirty, 0);
336 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
339 * Create a new mm. Intentionally fragment the address space by creating
340 * two alternating lists. Free both lists, one as dirty the other as clean.
341 * Try to allocate double the previous size with matching min_page_size. The
342 * allocation should never fail as it calls the force_merge. Also check that
343 * the page is always dirty after force_merge. Free the page as dirty, then
344 * repeat the whole thing, increment the order until we hit the max_order.
348 n_pages = mm_size / ps;
350 struct list_head *list;
358 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
360 "buddy_alloc hit an error size=%lu\n", ps);
361 } while (++i < n_pages);
363 drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
364 drm_buddy_free_list(&mm, &dirty, 0);
368 size = SZ_4K << order;
370 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
371 size, size, &allocated,
372 DRM_BUDDY_CLEAR_ALLOCATION),
373 "buddy_alloc hit an error size=%u\n", size);
375 list_for_each_entry(block, &allocated, link) {
377 KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
378 total += drm_buddy_block_size(&mm, block);
380 KUNIT_EXPECT_EQ(test, total, size);
382 drm_buddy_free_list(&mm, &allocated, 0);
383 } while (++order <= max_order);
388 * Create a new mm with a non power-of-two size. Allocate a random size, free as
389 * cleared and then call fini. This will ensure the multi-root force merge during
392 mm_size = 12 * SZ_4K;
393 size = max(round_up(prandom_u32_state(&prng) % mm_size, ps), ps);
394 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
395 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
396 size, ps, &allocated,
397 DRM_BUDDY_TOPDOWN_ALLOCATION),
398 "buddy_alloc hit an error size=%u\n", size);
399 drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
403 static void drm_test_buddy_alloc_contiguous(struct kunit *test)
405 const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
406 unsigned long i, n_pages, total;
407 struct drm_buddy_block *block;
412 LIST_HEAD(allocated);
414 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
417 * Idea is to fragment the address space by alternating block
418 * allocations between three different lists; one for left, middle and
419 * right. We can then free a list to simulate fragmentation. In
420 * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION,
421 * including the try_harder path.
425 n_pages = mm_size / ps;
427 struct list_head *list;
436 KUNIT_ASSERT_FALSE_MSG(test,
437 drm_buddy_alloc_blocks(&mm, 0, mm_size,
439 "buddy_alloc hit an error size=%lu\n",
441 } while (++i < n_pages);
443 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
444 3 * ps, ps, &allocated,
445 DRM_BUDDY_CONTIGUOUS_ALLOCATION),
446 "buddy_alloc didn't error size=%lu\n", 3 * ps);
448 drm_buddy_free_list(&mm, &middle, 0);
449 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
450 3 * ps, ps, &allocated,
451 DRM_BUDDY_CONTIGUOUS_ALLOCATION),
452 "buddy_alloc didn't error size=%lu\n", 3 * ps);
453 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
454 2 * ps, ps, &allocated,
455 DRM_BUDDY_CONTIGUOUS_ALLOCATION),
456 "buddy_alloc didn't error size=%lu\n", 2 * ps);
458 drm_buddy_free_list(&mm, &right, 0);
459 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
460 3 * ps, ps, &allocated,
461 DRM_BUDDY_CONTIGUOUS_ALLOCATION),
462 "buddy_alloc didn't error size=%lu\n", 3 * ps);
464 * At this point we should have enough contiguous space for 2 blocks,
465 * however they are never buddies (since we freed middle and right) so
466 * will require the try_harder logic to find them.
468 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
469 2 * ps, ps, &allocated,
470 DRM_BUDDY_CONTIGUOUS_ALLOCATION),
471 "buddy_alloc hit an error size=%lu\n", 2 * ps);
473 drm_buddy_free_list(&mm, &left, 0);
474 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
475 3 * ps, ps, &allocated,
476 DRM_BUDDY_CONTIGUOUS_ALLOCATION),
477 "buddy_alloc hit an error size=%lu\n", 3 * ps);
480 list_for_each_entry(block, &allocated, link)
481 total += drm_buddy_block_size(&mm, block);
483 KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3);
485 drm_buddy_free_list(&mm, &allocated, 0);
489 static void drm_test_buddy_alloc_pathological(struct kunit *test)
491 u64 mm_size, size, start = 0;
492 struct drm_buddy_block *block;
493 const int max_order = 3;
494 unsigned long flags = 0;
502 * Create a pot-sized mm, then allocate one of each possible
503 * order within. This should leave the mm with exactly one
504 * page left. Free the largest block, then whittle down again.
505 * Eventually we will have a fully 50% fragmented mm.
508 mm_size = SZ_4K << max_order;
509 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
510 "buddy_init failed\n");
512 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
514 for (top = max_order; top; top--) {
515 /* Make room by freeing the largest allocated block */
516 block = list_first_entry_or_null(&blocks, typeof(*block), link);
518 list_del(&block->link);
519 drm_buddy_free_block(&mm, block);
522 for (order = top; order--;) {
523 size = get_size(order, mm.chunk_size);
524 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
527 "buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
530 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
531 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
533 list_move_tail(&block->link, &blocks);
536 /* There should be one final page for this sub-allocation */
537 size = get_size(0, mm.chunk_size);
538 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
539 size, size, &tmp, flags),
540 "buddy_alloc hit -ENOMEM for hole\n");
542 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
543 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
545 list_move_tail(&block->link, &holes);
547 size = get_size(top, mm.chunk_size);
548 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
549 size, size, &tmp, flags),
550 "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
554 drm_buddy_free_list(&mm, &holes, 0);
556 /* Nothing larger than blocks of chunk_size now available */
557 for (order = 1; order <= max_order; order++) {
558 size = get_size(order, mm.chunk_size);
559 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
560 size, size, &tmp, flags),
561 "buddy_alloc unexpectedly succeeded at order %d, it should be full!",
565 list_splice_tail(&holes, &blocks);
566 drm_buddy_free_list(&mm, &blocks, 0);
570 static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
572 u64 mm_size, size, start = 0;
573 struct drm_buddy_block *block, *bn;
574 const unsigned int max_order = 16;
575 unsigned long flags = 0;
582 * Create a pot-sized mm, then allocate one of each possible
583 * order within. This should leave the mm with exactly one
587 mm_size = SZ_4K << max_order;
588 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
589 "buddy_init failed\n");
591 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
593 for (order = 0; order < max_order; order++) {
594 size = get_size(order, mm.chunk_size);
595 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
596 size, size, &tmp, flags),
597 "buddy_alloc hit -ENOMEM with order=%d\n",
600 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
601 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
603 list_move_tail(&block->link, &blocks);
606 /* And now the last remaining block available */
607 size = get_size(0, mm.chunk_size);
608 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
609 size, size, &tmp, flags),
610 "buddy_alloc hit -ENOMEM on final alloc\n");
612 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
613 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
615 list_move_tail(&block->link, &blocks);
617 /* Should be completely full! */
618 for (order = max_order; order--;) {
619 size = get_size(order, mm.chunk_size);
620 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
621 size, size, &tmp, flags),
622 "buddy_alloc unexpectedly succeeded, it should be full!");
625 block = list_last_entry(&blocks, typeof(*block), link);
626 list_del(&block->link);
627 drm_buddy_free_block(&mm, block);
629 /* As we free in increasing size, we make available larger blocks */
631 list_for_each_entry_safe(block, bn, &blocks, link) {
632 list_del(&block->link);
633 drm_buddy_free_block(&mm, block);
635 size = get_size(order, mm.chunk_size);
636 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
637 size, size, &tmp, flags),
638 "buddy_alloc hit -ENOMEM with order=%d\n",
641 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
642 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
644 list_del(&block->link);
645 drm_buddy_free_block(&mm, block);
649 /* To confirm, now the whole mm should be available */
650 size = get_size(max_order, mm.chunk_size);
651 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
652 size, size, &tmp, flags),
653 "buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
656 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
657 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
659 list_del(&block->link);
660 drm_buddy_free_block(&mm, block);
661 drm_buddy_free_list(&mm, &blocks, 0);
665 static void drm_test_buddy_alloc_optimistic(struct kunit *test)
667 u64 mm_size, size, start = 0;
668 struct drm_buddy_block *block;
669 unsigned long flags = 0;
670 const int max_order = 16;
677 * Create a mm with one block of each order available, and
678 * try to allocate them all.
681 mm_size = SZ_4K * ((1 << (max_order + 1)) - 1);
683 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
684 "buddy_init failed\n");
686 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
688 for (order = 0; order <= max_order; order++) {
689 size = get_size(order, mm.chunk_size);
690 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
691 size, size, &tmp, flags),
692 "buddy_alloc hit -ENOMEM with order=%d\n",
695 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
696 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
698 list_move_tail(&block->link, &blocks);
701 /* Should be completely full! */
702 size = get_size(0, mm.chunk_size);
703 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
704 size, size, &tmp, flags),
705 "buddy_alloc unexpectedly succeeded, it should be full!");
707 drm_buddy_free_list(&mm, &blocks, 0);
711 static void drm_test_buddy_alloc_limit(struct kunit *test)
713 u64 size = U64_MAX, start = 0;
714 struct drm_buddy_block *block;
715 unsigned long flags = 0;
716 LIST_HEAD(allocated);
719 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, SZ_4K));
721 KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
722 "mm.max_order(%d) != %d\n", mm.max_order,
723 DRM_BUDDY_MAX_ORDER);
725 size = mm.chunk_size << mm.max_order;
726 KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
727 mm.chunk_size, &allocated, flags));
729 block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
730 KUNIT_EXPECT_TRUE(test, block);
732 KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order,
733 "block order(%d) != %d\n",
734 drm_buddy_block_order(block), mm.max_order);
736 KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
737 BIT_ULL(mm.max_order) * mm.chunk_size,
738 "block size(%llu) != %llu\n",
739 drm_buddy_block_size(&mm, block),
740 BIT_ULL(mm.max_order) * mm.chunk_size);
742 drm_buddy_free_list(&mm, &allocated, 0);
746 static int drm_buddy_suite_init(struct kunit_suite *suite)
749 random_seed = get_random_u32();
751 kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n",
757 static struct kunit_case drm_buddy_tests[] = {
758 KUNIT_CASE(drm_test_buddy_alloc_limit),
759 KUNIT_CASE(drm_test_buddy_alloc_optimistic),
760 KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
761 KUNIT_CASE(drm_test_buddy_alloc_pathological),
762 KUNIT_CASE(drm_test_buddy_alloc_contiguous),
763 KUNIT_CASE(drm_test_buddy_alloc_clear),
764 KUNIT_CASE(drm_test_buddy_alloc_range_bias),
768 static struct kunit_suite drm_buddy_test_suite = {
770 .suite_init = drm_buddy_suite_init,
771 .test_cases = drm_buddy_tests,
774 kunit_test_suite(drm_buddy_test_suite);
776 MODULE_AUTHOR("Intel Corporation");
777 MODULE_DESCRIPTION("Kunit test for drm_buddy functions");
778 MODULE_LICENSE("GPL");