]> Git Repo - linux.git/blob - drivers/gpu/drm/drm_buddy.c
Merge tag 'rpmsg-v6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/remoteproc...
[linux.git] / drivers / gpu / drm / drm_buddy.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5
6 #include <linux/kmemleak.h>
7 #include <linux/module.h>
8 #include <linux/sizes.h>
9
10 #include <drm/drm_buddy.h>
11
12 static struct kmem_cache *slab_blocks;
13
14 static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
15                                                struct drm_buddy_block *parent,
16                                                unsigned int order,
17                                                u64 offset)
18 {
19         struct drm_buddy_block *block;
20
21         BUG_ON(order > DRM_BUDDY_MAX_ORDER);
22
23         block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
24         if (!block)
25                 return NULL;
26
27         block->header = offset;
28         block->header |= order;
29         block->parent = parent;
30
31         BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
32         return block;
33 }
34
35 static void drm_block_free(struct drm_buddy *mm,
36                            struct drm_buddy_block *block)
37 {
38         kmem_cache_free(slab_blocks, block);
39 }
40
41 static void list_insert_sorted(struct drm_buddy *mm,
42                                struct drm_buddy_block *block)
43 {
44         struct drm_buddy_block *node;
45         struct list_head *head;
46
47         head = &mm->free_list[drm_buddy_block_order(block)];
48         if (list_empty(head)) {
49                 list_add(&block->link, head);
50                 return;
51         }
52
53         list_for_each_entry(node, head, link)
54                 if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
55                         break;
56
57         __list_add(&block->link, node->link.prev, &node->link);
58 }
59
60 static void mark_allocated(struct drm_buddy_block *block)
61 {
62         block->header &= ~DRM_BUDDY_HEADER_STATE;
63         block->header |= DRM_BUDDY_ALLOCATED;
64
65         list_del(&block->link);
66 }
67
68 static void mark_free(struct drm_buddy *mm,
69                       struct drm_buddy_block *block)
70 {
71         block->header &= ~DRM_BUDDY_HEADER_STATE;
72         block->header |= DRM_BUDDY_FREE;
73
74         list_insert_sorted(mm, block);
75 }
76
77 static void mark_split(struct drm_buddy_block *block)
78 {
79         block->header &= ~DRM_BUDDY_HEADER_STATE;
80         block->header |= DRM_BUDDY_SPLIT;
81
82         list_del(&block->link);
83 }
84
85 /**
86  * drm_buddy_init - init memory manager
87  *
88  * @mm: DRM buddy manager to initialize
89  * @size: size in bytes to manage
90  * @chunk_size: minimum page size in bytes for our allocations
91  *
92  * Initializes the memory manager and its resources.
93  *
94  * Returns:
95  * 0 on success, error code on failure.
96  */
97 int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
98 {
99         unsigned int i;
100         u64 offset;
101
102         if (size < chunk_size)
103                 return -EINVAL;
104
105         if (chunk_size < PAGE_SIZE)
106                 return -EINVAL;
107
108         if (!is_power_of_2(chunk_size))
109                 return -EINVAL;
110
111         size = round_down(size, chunk_size);
112
113         mm->size = size;
114         mm->avail = size;
115         mm->chunk_size = chunk_size;
116         mm->max_order = ilog2(size) - ilog2(chunk_size);
117
118         BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
119
120         mm->free_list = kmalloc_array(mm->max_order + 1,
121                                       sizeof(struct list_head),
122                                       GFP_KERNEL);
123         if (!mm->free_list)
124                 return -ENOMEM;
125
126         for (i = 0; i <= mm->max_order; ++i)
127                 INIT_LIST_HEAD(&mm->free_list[i]);
128
129         mm->n_roots = hweight64(size);
130
131         mm->roots = kmalloc_array(mm->n_roots,
132                                   sizeof(struct drm_buddy_block *),
133                                   GFP_KERNEL);
134         if (!mm->roots)
135                 goto out_free_list;
136
137         offset = 0;
138         i = 0;
139
140         /*
141          * Split into power-of-two blocks, in case we are given a size that is
142          * not itself a power-of-two.
143          */
144         do {
145                 struct drm_buddy_block *root;
146                 unsigned int order;
147                 u64 root_size;
148
149                 order = ilog2(size) - ilog2(chunk_size);
150                 root_size = chunk_size << order;
151
152                 root = drm_block_alloc(mm, NULL, order, offset);
153                 if (!root)
154                         goto out_free_roots;
155
156                 mark_free(mm, root);
157
158                 BUG_ON(i > mm->max_order);
159                 BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
160
161                 mm->roots[i] = root;
162
163                 offset += root_size;
164                 size -= root_size;
165                 i++;
166         } while (size);
167
168         return 0;
169
170 out_free_roots:
171         while (i--)
172                 drm_block_free(mm, mm->roots[i]);
173         kfree(mm->roots);
174 out_free_list:
175         kfree(mm->free_list);
176         return -ENOMEM;
177 }
178 EXPORT_SYMBOL(drm_buddy_init);
179
180 /**
181  * drm_buddy_fini - tear down the memory manager
182  *
183  * @mm: DRM buddy manager to free
184  *
185  * Cleanup memory manager resources and the freelist
186  */
187 void drm_buddy_fini(struct drm_buddy *mm)
188 {
189         int i;
190
191         for (i = 0; i < mm->n_roots; ++i) {
192                 WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
193                 drm_block_free(mm, mm->roots[i]);
194         }
195
196         WARN_ON(mm->avail != mm->size);
197
198         kfree(mm->roots);
199         kfree(mm->free_list);
200 }
201 EXPORT_SYMBOL(drm_buddy_fini);
202
203 static int split_block(struct drm_buddy *mm,
204                        struct drm_buddy_block *block)
205 {
206         unsigned int block_order = drm_buddy_block_order(block) - 1;
207         u64 offset = drm_buddy_block_offset(block);
208
209         BUG_ON(!drm_buddy_block_is_free(block));
210         BUG_ON(!drm_buddy_block_order(block));
211
212         block->left = drm_block_alloc(mm, block, block_order, offset);
213         if (!block->left)
214                 return -ENOMEM;
215
216         block->right = drm_block_alloc(mm, block, block_order,
217                                        offset + (mm->chunk_size << block_order));
218         if (!block->right) {
219                 drm_block_free(mm, block->left);
220                 return -ENOMEM;
221         }
222
223         mark_free(mm, block->left);
224         mark_free(mm, block->right);
225
226         mark_split(block);
227
228         return 0;
229 }
230
231 static struct drm_buddy_block *
232 __get_buddy(struct drm_buddy_block *block)
233 {
234         struct drm_buddy_block *parent;
235
236         parent = block->parent;
237         if (!parent)
238                 return NULL;
239
240         if (parent->left == block)
241                 return parent->right;
242
243         return parent->left;
244 }
245
246 /**
247  * drm_get_buddy - get buddy address
248  *
249  * @block: DRM buddy block
250  *
251  * Returns the corresponding buddy block for @block, or NULL
252  * if this is a root block and can't be merged further.
253  * Requires some kind of locking to protect against
254  * any concurrent allocate and free operations.
255  */
256 struct drm_buddy_block *
257 drm_get_buddy(struct drm_buddy_block *block)
258 {
259         return __get_buddy(block);
260 }
261 EXPORT_SYMBOL(drm_get_buddy);
262
263 static void __drm_buddy_free(struct drm_buddy *mm,
264                              struct drm_buddy_block *block)
265 {
266         struct drm_buddy_block *parent;
267
268         while ((parent = block->parent)) {
269                 struct drm_buddy_block *buddy;
270
271                 buddy = __get_buddy(block);
272
273                 if (!drm_buddy_block_is_free(buddy))
274                         break;
275
276                 list_del(&buddy->link);
277
278                 drm_block_free(mm, block);
279                 drm_block_free(mm, buddy);
280
281                 block = parent;
282         }
283
284         mark_free(mm, block);
285 }
286
287 /**
288  * drm_buddy_free_block - free a block
289  *
290  * @mm: DRM buddy manager
291  * @block: block to be freed
292  */
293 void drm_buddy_free_block(struct drm_buddy *mm,
294                           struct drm_buddy_block *block)
295 {
296         BUG_ON(!drm_buddy_block_is_allocated(block));
297         mm->avail += drm_buddy_block_size(mm, block);
298         __drm_buddy_free(mm, block);
299 }
300 EXPORT_SYMBOL(drm_buddy_free_block);
301
302 /**
303  * drm_buddy_free_list - free blocks
304  *
305  * @mm: DRM buddy manager
306  * @objects: input list head to free blocks
307  */
308 void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
309 {
310         struct drm_buddy_block *block, *on;
311
312         list_for_each_entry_safe(block, on, objects, link) {
313                 drm_buddy_free_block(mm, block);
314                 cond_resched();
315         }
316         INIT_LIST_HEAD(objects);
317 }
318 EXPORT_SYMBOL(drm_buddy_free_list);
319
320 static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
321 {
322         return s1 <= e2 && e1 >= s2;
323 }
324
325 static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
326 {
327         return s1 <= s2 && e1 >= e2;
328 }
329
330 static struct drm_buddy_block *
331 alloc_range_bias(struct drm_buddy *mm,
332                  u64 start, u64 end,
333                  unsigned int order)
334 {
335         u64 req_size = mm->chunk_size << order;
336         struct drm_buddy_block *block;
337         struct drm_buddy_block *buddy;
338         LIST_HEAD(dfs);
339         int err;
340         int i;
341
342         end = end - 1;
343
344         for (i = 0; i < mm->n_roots; ++i)
345                 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
346
347         do {
348                 u64 block_start;
349                 u64 block_end;
350
351                 block = list_first_entry_or_null(&dfs,
352                                                  struct drm_buddy_block,
353                                                  tmp_link);
354                 if (!block)
355                         break;
356
357                 list_del(&block->tmp_link);
358
359                 if (drm_buddy_block_order(block) < order)
360                         continue;
361
362                 block_start = drm_buddy_block_offset(block);
363                 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
364
365                 if (!overlaps(start, end, block_start, block_end))
366                         continue;
367
368                 if (drm_buddy_block_is_allocated(block))
369                         continue;
370
371                 if (block_start < start || block_end > end) {
372                         u64 adjusted_start = max(block_start, start);
373                         u64 adjusted_end = min(block_end, end);
374
375                         if (round_down(adjusted_end + 1, req_size) <=
376                             round_up(adjusted_start, req_size))
377                                 continue;
378                 }
379
380                 if (contains(start, end, block_start, block_end) &&
381                     order == drm_buddy_block_order(block)) {
382                         /*
383                          * Find the free block within the range.
384                          */
385                         if (drm_buddy_block_is_free(block))
386                                 return block;
387
388                         continue;
389                 }
390
391                 if (!drm_buddy_block_is_split(block)) {
392                         err = split_block(mm, block);
393                         if (unlikely(err))
394                                 goto err_undo;
395                 }
396
397                 list_add(&block->right->tmp_link, &dfs);
398                 list_add(&block->left->tmp_link, &dfs);
399         } while (1);
400
401         return ERR_PTR(-ENOSPC);
402
403 err_undo:
404         /*
405          * We really don't want to leave around a bunch of split blocks, since
406          * bigger is better, so make sure we merge everything back before we
407          * free the allocated blocks.
408          */
409         buddy = __get_buddy(block);
410         if (buddy &&
411             (drm_buddy_block_is_free(block) &&
412              drm_buddy_block_is_free(buddy)))
413                 __drm_buddy_free(mm, block);
414         return ERR_PTR(err);
415 }
416
417 static struct drm_buddy_block *
418 get_maxblock(struct drm_buddy *mm, unsigned int order)
419 {
420         struct drm_buddy_block *max_block = NULL, *node;
421         unsigned int i;
422
423         for (i = order; i <= mm->max_order; ++i) {
424                 if (!list_empty(&mm->free_list[i])) {
425                         node = list_last_entry(&mm->free_list[i],
426                                                struct drm_buddy_block,
427                                                link);
428                         if (!max_block) {
429                                 max_block = node;
430                                 continue;
431                         }
432
433                         if (drm_buddy_block_offset(node) >
434                             drm_buddy_block_offset(max_block)) {
435                                 max_block = node;
436                         }
437                 }
438         }
439
440         return max_block;
441 }
442
443 static struct drm_buddy_block *
444 alloc_from_freelist(struct drm_buddy *mm,
445                     unsigned int order,
446                     unsigned long flags)
447 {
448         struct drm_buddy_block *block = NULL;
449         unsigned int tmp;
450         int err;
451
452         if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
453                 block = get_maxblock(mm, order);
454                 if (block)
455                         /* Store the obtained block order */
456                         tmp = drm_buddy_block_order(block);
457         } else {
458                 for (tmp = order; tmp <= mm->max_order; ++tmp) {
459                         if (!list_empty(&mm->free_list[tmp])) {
460                                 block = list_last_entry(&mm->free_list[tmp],
461                                                         struct drm_buddy_block,
462                                                         link);
463                                 if (block)
464                                         break;
465                         }
466                 }
467         }
468
469         if (!block)
470                 return ERR_PTR(-ENOSPC);
471
472         BUG_ON(!drm_buddy_block_is_free(block));
473
474         while (tmp != order) {
475                 err = split_block(mm, block);
476                 if (unlikely(err))
477                         goto err_undo;
478
479                 block = block->right;
480                 tmp--;
481         }
482         return block;
483
484 err_undo:
485         if (tmp != order)
486                 __drm_buddy_free(mm, block);
487         return ERR_PTR(err);
488 }
489
490 static int __alloc_range(struct drm_buddy *mm,
491                          struct list_head *dfs,
492                          u64 start, u64 size,
493                          struct list_head *blocks,
494                          u64 *total_allocated_on_err)
495 {
496         struct drm_buddy_block *block;
497         struct drm_buddy_block *buddy;
498         u64 total_allocated = 0;
499         LIST_HEAD(allocated);
500         u64 end;
501         int err;
502
503         end = start + size - 1;
504
505         do {
506                 u64 block_start;
507                 u64 block_end;
508
509                 block = list_first_entry_or_null(dfs,
510                                                  struct drm_buddy_block,
511                                                  tmp_link);
512                 if (!block)
513                         break;
514
515                 list_del(&block->tmp_link);
516
517                 block_start = drm_buddy_block_offset(block);
518                 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
519
520                 if (!overlaps(start, end, block_start, block_end))
521                         continue;
522
523                 if (drm_buddy_block_is_allocated(block)) {
524                         err = -ENOSPC;
525                         goto err_free;
526                 }
527
528                 if (contains(start, end, block_start, block_end)) {
529                         if (!drm_buddy_block_is_free(block)) {
530                                 err = -ENOSPC;
531                                 goto err_free;
532                         }
533
534                         mark_allocated(block);
535                         total_allocated += drm_buddy_block_size(mm, block);
536                         mm->avail -= drm_buddy_block_size(mm, block);
537                         list_add_tail(&block->link, &allocated);
538                         continue;
539                 }
540
541                 if (!drm_buddy_block_is_split(block)) {
542                         err = split_block(mm, block);
543                         if (unlikely(err))
544                                 goto err_undo;
545                 }
546
547                 list_add(&block->right->tmp_link, dfs);
548                 list_add(&block->left->tmp_link, dfs);
549         } while (1);
550
551         if (total_allocated < size) {
552                 err = -ENOSPC;
553                 goto err_free;
554         }
555
556         list_splice_tail(&allocated, blocks);
557
558         return 0;
559
560 err_undo:
561         /*
562          * We really don't want to leave around a bunch of split blocks, since
563          * bigger is better, so make sure we merge everything back before we
564          * free the allocated blocks.
565          */
566         buddy = __get_buddy(block);
567         if (buddy &&
568             (drm_buddy_block_is_free(block) &&
569              drm_buddy_block_is_free(buddy)))
570                 __drm_buddy_free(mm, block);
571
572 err_free:
573         if (err == -ENOSPC && total_allocated_on_err) {
574                 list_splice_tail(&allocated, blocks);
575                 *total_allocated_on_err = total_allocated;
576         } else {
577                 drm_buddy_free_list(mm, &allocated);
578         }
579
580         return err;
581 }
582
583 static int __drm_buddy_alloc_range(struct drm_buddy *mm,
584                                    u64 start,
585                                    u64 size,
586                                    u64 *total_allocated_on_err,
587                                    struct list_head *blocks)
588 {
589         LIST_HEAD(dfs);
590         int i;
591
592         for (i = 0; i < mm->n_roots; ++i)
593                 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
594
595         return __alloc_range(mm, &dfs, start, size,
596                              blocks, total_allocated_on_err);
597 }
598
599 static int __alloc_contig_try_harder(struct drm_buddy *mm,
600                                      u64 size,
601                                      u64 min_block_size,
602                                      struct list_head *blocks)
603 {
604         u64 rhs_offset, lhs_offset, lhs_size, filled;
605         struct drm_buddy_block *block;
606         struct list_head *list;
607         LIST_HEAD(blocks_lhs);
608         unsigned long pages;
609         unsigned int order;
610         u64 modify_size;
611         int err;
612
613         modify_size = rounddown_pow_of_two(size);
614         pages = modify_size >> ilog2(mm->chunk_size);
615         order = fls(pages) - 1;
616         if (order == 0)
617                 return -ENOSPC;
618
619         list = &mm->free_list[order];
620         if (list_empty(list))
621                 return -ENOSPC;
622
623         list_for_each_entry_reverse(block, list, link) {
624                 /* Allocate blocks traversing RHS */
625                 rhs_offset = drm_buddy_block_offset(block);
626                 err =  __drm_buddy_alloc_range(mm, rhs_offset, size,
627                                                &filled, blocks);
628                 if (!err || err != -ENOSPC)
629                         return err;
630
631                 lhs_size = max((size - filled), min_block_size);
632                 if (!IS_ALIGNED(lhs_size, min_block_size))
633                         lhs_size = round_up(lhs_size, min_block_size);
634
635                 /* Allocate blocks traversing LHS */
636                 lhs_offset = drm_buddy_block_offset(block) - lhs_size;
637                 err =  __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
638                                                NULL, &blocks_lhs);
639                 if (!err) {
640                         list_splice(&blocks_lhs, blocks);
641                         return 0;
642                 } else if (err != -ENOSPC) {
643                         drm_buddy_free_list(mm, blocks);
644                         return err;
645                 }
646                 /* Free blocks for the next iteration */
647                 drm_buddy_free_list(mm, blocks);
648         }
649
650         return -ENOSPC;
651 }
652
653 /**
654  * drm_buddy_block_trim - free unused pages
655  *
656  * @mm: DRM buddy manager
657  * @new_size: original size requested
658  * @blocks: Input and output list of allocated blocks.
659  * MUST contain single block as input to be trimmed.
660  * On success will contain the newly allocated blocks
661  * making up the @new_size. Blocks always appear in
662  * ascending order
663  *
664  * For contiguous allocation, we round up the size to the nearest
665  * power of two value, drivers consume *actual* size, so remaining
666  * portions are unused and can be optionally freed with this function
667  *
668  * Returns:
669  * 0 on success, error code on failure.
670  */
671 int drm_buddy_block_trim(struct drm_buddy *mm,
672                          u64 new_size,
673                          struct list_head *blocks)
674 {
675         struct drm_buddy_block *parent;
676         struct drm_buddy_block *block;
677         LIST_HEAD(dfs);
678         u64 new_start;
679         int err;
680
681         if (!list_is_singular(blocks))
682                 return -EINVAL;
683
684         block = list_first_entry(blocks,
685                                  struct drm_buddy_block,
686                                  link);
687
688         if (WARN_ON(!drm_buddy_block_is_allocated(block)))
689                 return -EINVAL;
690
691         if (new_size > drm_buddy_block_size(mm, block))
692                 return -EINVAL;
693
694         if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
695                 return -EINVAL;
696
697         if (new_size == drm_buddy_block_size(mm, block))
698                 return 0;
699
700         list_del(&block->link);
701         mark_free(mm, block);
702         mm->avail += drm_buddy_block_size(mm, block);
703
704         /* Prevent recursively freeing this node */
705         parent = block->parent;
706         block->parent = NULL;
707
708         new_start = drm_buddy_block_offset(block);
709         list_add(&block->tmp_link, &dfs);
710         err =  __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
711         if (err) {
712                 mark_allocated(block);
713                 mm->avail -= drm_buddy_block_size(mm, block);
714                 list_add(&block->link, blocks);
715         }
716
717         block->parent = parent;
718         return err;
719 }
720 EXPORT_SYMBOL(drm_buddy_block_trim);
721
722 /**
723  * drm_buddy_alloc_blocks - allocate power-of-two blocks
724  *
725  * @mm: DRM buddy manager to allocate from
726  * @start: start of the allowed range for this block
727  * @end: end of the allowed range for this block
728  * @size: size of the allocation
729  * @min_block_size: alignment of the allocation
730  * @blocks: output list head to add allocated blocks
731  * @flags: DRM_BUDDY_*_ALLOCATION flags
732  *
733  * alloc_range_bias() called on range limitations, which traverses
734  * the tree and returns the desired block.
735  *
736  * alloc_from_freelist() called when *no* range restrictions
737  * are enforced, which picks the block from the freelist.
738  *
739  * Returns:
740  * 0 on success, error code on failure.
741  */
742 int drm_buddy_alloc_blocks(struct drm_buddy *mm,
743                            u64 start, u64 end, u64 size,
744                            u64 min_block_size,
745                            struct list_head *blocks,
746                            unsigned long flags)
747 {
748         struct drm_buddy_block *block = NULL;
749         u64 original_size, original_min_size;
750         unsigned int min_order, order;
751         LIST_HEAD(allocated);
752         unsigned long pages;
753         int err;
754
755         if (size < mm->chunk_size)
756                 return -EINVAL;
757
758         if (min_block_size < mm->chunk_size)
759                 return -EINVAL;
760
761         if (!is_power_of_2(min_block_size))
762                 return -EINVAL;
763
764         if (!IS_ALIGNED(start | end | size, mm->chunk_size))
765                 return -EINVAL;
766
767         if (end > mm->size)
768                 return -EINVAL;
769
770         if (range_overflows(start, size, mm->size))
771                 return -EINVAL;
772
773         /* Actual range allocation */
774         if (start + size == end) {
775                 if (!IS_ALIGNED(start | end, min_block_size))
776                         return -EINVAL;
777
778                 return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
779         }
780
781         original_size = size;
782         original_min_size = min_block_size;
783
784         /* Roundup the size to power of 2 */
785         if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
786                 size = roundup_pow_of_two(size);
787                 min_block_size = size;
788         /* Align size value to min_block_size */
789         } else if (!IS_ALIGNED(size, min_block_size)) {
790                 size = round_up(size, min_block_size);
791         }
792
793         pages = size >> ilog2(mm->chunk_size);
794         order = fls(pages) - 1;
795         min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
796
797         do {
798                 order = min(order, (unsigned int)fls(pages) - 1);
799                 BUG_ON(order > mm->max_order);
800                 BUG_ON(order < min_order);
801
802                 do {
803                         if (flags & DRM_BUDDY_RANGE_ALLOCATION)
804                                 /* Allocate traversing within the range */
805                                 block = alloc_range_bias(mm, start, end, order);
806                         else
807                                 /* Allocate from freelist */
808                                 block = alloc_from_freelist(mm, order, flags);
809
810                         if (!IS_ERR(block))
811                                 break;
812
813                         if (order-- == min_order) {
814                                 if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
815                                     !(flags & DRM_BUDDY_RANGE_ALLOCATION))
816                                         /*
817                                          * Try contiguous block allocation through
818                                          * try harder method
819                                          */
820                                         return __alloc_contig_try_harder(mm,
821                                                                          original_size,
822                                                                          original_min_size,
823                                                                          blocks);
824                                 err = -ENOSPC;
825                                 goto err_free;
826                         }
827                 } while (1);
828
829                 mark_allocated(block);
830                 mm->avail -= drm_buddy_block_size(mm, block);
831                 kmemleak_update_trace(block);
832                 list_add_tail(&block->link, &allocated);
833
834                 pages -= BIT(order);
835
836                 if (!pages)
837                         break;
838         } while (1);
839
840         /* Trim the allocated block to the required size */
841         if (original_size != size) {
842                 struct list_head *trim_list;
843                 LIST_HEAD(temp);
844                 u64 trim_size;
845
846                 trim_list = &allocated;
847                 trim_size = original_size;
848
849                 if (!list_is_singular(&allocated)) {
850                         block = list_last_entry(&allocated, typeof(*block), link);
851                         list_move(&block->link, &temp);
852                         trim_list = &temp;
853                         trim_size = drm_buddy_block_size(mm, block) -
854                                 (size - original_size);
855                 }
856
857                 drm_buddy_block_trim(mm,
858                                      trim_size,
859                                      trim_list);
860
861                 if (!list_empty(&temp))
862                         list_splice_tail(trim_list, &allocated);
863         }
864
865         list_splice_tail(&allocated, blocks);
866         return 0;
867
868 err_free:
869         drm_buddy_free_list(mm, &allocated);
870         return err;
871 }
872 EXPORT_SYMBOL(drm_buddy_alloc_blocks);
873
874 /**
875  * drm_buddy_block_print - print block information
876  *
877  * @mm: DRM buddy manager
878  * @block: DRM buddy block
879  * @p: DRM printer to use
880  */
881 void drm_buddy_block_print(struct drm_buddy *mm,
882                            struct drm_buddy_block *block,
883                            struct drm_printer *p)
884 {
885         u64 start = drm_buddy_block_offset(block);
886         u64 size = drm_buddy_block_size(mm, block);
887
888         drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
889 }
890 EXPORT_SYMBOL(drm_buddy_block_print);
891
892 /**
893  * drm_buddy_print - print allocator state
894  *
895  * @mm: DRM buddy manager
896  * @p: DRM printer to use
897  */
898 void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
899 {
900         int order;
901
902         drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
903                    mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
904
905         for (order = mm->max_order; order >= 0; order--) {
906                 struct drm_buddy_block *block;
907                 u64 count = 0, free;
908
909                 list_for_each_entry(block, &mm->free_list[order], link) {
910                         BUG_ON(!drm_buddy_block_is_free(block));
911                         count++;
912                 }
913
914                 drm_printf(p, "order-%2d ", order);
915
916                 free = count * (mm->chunk_size << order);
917                 if (free < SZ_1M)
918                         drm_printf(p, "free: %8llu KiB", free >> 10);
919                 else
920                         drm_printf(p, "free: %8llu MiB", free >> 20);
921
922                 drm_printf(p, ", blocks: %llu\n", count);
923         }
924 }
925 EXPORT_SYMBOL(drm_buddy_print);
926
927 static void drm_buddy_module_exit(void)
928 {
929         kmem_cache_destroy(slab_blocks);
930 }
931
932 static int __init drm_buddy_module_init(void)
933 {
934         slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
935         if (!slab_blocks)
936                 return -ENOMEM;
937
938         return 0;
939 }
940
941 module_init(drm_buddy_module_init);
942 module_exit(drm_buddy_module_exit);
943
944 MODULE_DESCRIPTION("DRM Buddy Allocator");
945 MODULE_LICENSE("Dual MIT/GPL");
This page took 0.087295 seconds and 4 git commands to generate.