]> Git Repo - linux.git/blob - drivers/gpu/drm/drm_buddy.c
Merge tag 'cxl-for-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl
[linux.git] / drivers / gpu / drm / drm_buddy.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5
6 #include <linux/kmemleak.h>
7 #include <linux/module.h>
8 #include <linux/sizes.h>
9
10 #include <drm/drm_buddy.h>
11
12 static struct kmem_cache *slab_blocks;
13
14 static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
15                                                struct drm_buddy_block *parent,
16                                                unsigned int order,
17                                                u64 offset)
18 {
19         struct drm_buddy_block *block;
20
21         BUG_ON(order > DRM_BUDDY_MAX_ORDER);
22
23         block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
24         if (!block)
25                 return NULL;
26
27         block->header = offset;
28         block->header |= order;
29         block->parent = parent;
30
31         BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
32         return block;
33 }
34
35 static void drm_block_free(struct drm_buddy *mm,
36                            struct drm_buddy_block *block)
37 {
38         kmem_cache_free(slab_blocks, block);
39 }
40
41 static void list_insert_sorted(struct drm_buddy *mm,
42                                struct drm_buddy_block *block)
43 {
44         struct drm_buddy_block *node;
45         struct list_head *head;
46
47         head = &mm->free_list[drm_buddy_block_order(block)];
48         if (list_empty(head)) {
49                 list_add(&block->link, head);
50                 return;
51         }
52
53         list_for_each_entry(node, head, link)
54                 if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
55                         break;
56
57         __list_add(&block->link, node->link.prev, &node->link);
58 }
59
60 static void clear_reset(struct drm_buddy_block *block)
61 {
62         block->header &= ~DRM_BUDDY_HEADER_CLEAR;
63 }
64
65 static void mark_cleared(struct drm_buddy_block *block)
66 {
67         block->header |= DRM_BUDDY_HEADER_CLEAR;
68 }
69
70 static void mark_allocated(struct drm_buddy_block *block)
71 {
72         block->header &= ~DRM_BUDDY_HEADER_STATE;
73         block->header |= DRM_BUDDY_ALLOCATED;
74
75         list_del(&block->link);
76 }
77
78 static void mark_free(struct drm_buddy *mm,
79                       struct drm_buddy_block *block)
80 {
81         block->header &= ~DRM_BUDDY_HEADER_STATE;
82         block->header |= DRM_BUDDY_FREE;
83
84         list_insert_sorted(mm, block);
85 }
86
87 static void mark_split(struct drm_buddy_block *block)
88 {
89         block->header &= ~DRM_BUDDY_HEADER_STATE;
90         block->header |= DRM_BUDDY_SPLIT;
91
92         list_del(&block->link);
93 }
94
95 static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
96 {
97         return s1 <= e2 && e1 >= s2;
98 }
99
100 static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
101 {
102         return s1 <= s2 && e1 >= e2;
103 }
104
105 static struct drm_buddy_block *
106 __get_buddy(struct drm_buddy_block *block)
107 {
108         struct drm_buddy_block *parent;
109
110         parent = block->parent;
111         if (!parent)
112                 return NULL;
113
114         if (parent->left == block)
115                 return parent->right;
116
117         return parent->left;
118 }
119
120 static unsigned int __drm_buddy_free(struct drm_buddy *mm,
121                                      struct drm_buddy_block *block,
122                                      bool force_merge)
123 {
124         struct drm_buddy_block *parent;
125         unsigned int order;
126
127         while ((parent = block->parent)) {
128                 struct drm_buddy_block *buddy;
129
130                 buddy = __get_buddy(block);
131
132                 if (!drm_buddy_block_is_free(buddy))
133                         break;
134
135                 if (!force_merge) {
136                         /*
137                          * Check the block and its buddy clear state and exit
138                          * the loop if they both have the dissimilar state.
139                          */
140                         if (drm_buddy_block_is_clear(block) !=
141                             drm_buddy_block_is_clear(buddy))
142                                 break;
143
144                         if (drm_buddy_block_is_clear(block))
145                                 mark_cleared(parent);
146                 }
147
148                 list_del(&buddy->link);
149                 if (force_merge && drm_buddy_block_is_clear(buddy))
150                         mm->clear_avail -= drm_buddy_block_size(mm, buddy);
151
152                 drm_block_free(mm, block);
153                 drm_block_free(mm, buddy);
154
155                 block = parent;
156         }
157
158         order = drm_buddy_block_order(block);
159         mark_free(mm, block);
160
161         return order;
162 }
163
164 static int __force_merge(struct drm_buddy *mm,
165                          u64 start,
166                          u64 end,
167                          unsigned int min_order)
168 {
169         unsigned int order;
170         int i;
171
172         if (!min_order)
173                 return -ENOMEM;
174
175         if (min_order > mm->max_order)
176                 return -EINVAL;
177
178         for (i = min_order - 1; i >= 0; i--) {
179                 struct drm_buddy_block *block, *prev;
180
181                 list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) {
182                         struct drm_buddy_block *buddy;
183                         u64 block_start, block_end;
184
185                         if (!block->parent)
186                                 continue;
187
188                         block_start = drm_buddy_block_offset(block);
189                         block_end = block_start + drm_buddy_block_size(mm, block) - 1;
190
191                         if (!contains(start, end, block_start, block_end))
192                                 continue;
193
194                         buddy = __get_buddy(block);
195                         if (!drm_buddy_block_is_free(buddy))
196                                 continue;
197
198                         WARN_ON(drm_buddy_block_is_clear(block) ==
199                                 drm_buddy_block_is_clear(buddy));
200
201                         /*
202                          * If the prev block is same as buddy, don't access the
203                          * block in the next iteration as we would free the
204                          * buddy block as part of the free function.
205                          */
206                         if (prev == buddy)
207                                 prev = list_prev_entry(prev, link);
208
209                         list_del(&block->link);
210                         if (drm_buddy_block_is_clear(block))
211                                 mm->clear_avail -= drm_buddy_block_size(mm, block);
212
213                         order = __drm_buddy_free(mm, block, true);
214                         if (order >= min_order)
215                                 return 0;
216                 }
217         }
218
219         return -ENOMEM;
220 }
221
222 /**
223  * drm_buddy_init - init memory manager
224  *
225  * @mm: DRM buddy manager to initialize
226  * @size: size in bytes to manage
227  * @chunk_size: minimum page size in bytes for our allocations
228  *
229  * Initializes the memory manager and its resources.
230  *
231  * Returns:
232  * 0 on success, error code on failure.
233  */
234 int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
235 {
236         unsigned int i;
237         u64 offset;
238
239         if (size < chunk_size)
240                 return -EINVAL;
241
242         if (chunk_size < PAGE_SIZE)
243                 return -EINVAL;
244
245         if (!is_power_of_2(chunk_size))
246                 return -EINVAL;
247
248         size = round_down(size, chunk_size);
249
250         mm->size = size;
251         mm->avail = size;
252         mm->chunk_size = chunk_size;
253         mm->max_order = ilog2(size) - ilog2(chunk_size);
254
255         BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
256
257         mm->free_list = kmalloc_array(mm->max_order + 1,
258                                       sizeof(struct list_head),
259                                       GFP_KERNEL);
260         if (!mm->free_list)
261                 return -ENOMEM;
262
263         for (i = 0; i <= mm->max_order; ++i)
264                 INIT_LIST_HEAD(&mm->free_list[i]);
265
266         mm->n_roots = hweight64(size);
267
268         mm->roots = kmalloc_array(mm->n_roots,
269                                   sizeof(struct drm_buddy_block *),
270                                   GFP_KERNEL);
271         if (!mm->roots)
272                 goto out_free_list;
273
274         offset = 0;
275         i = 0;
276
277         /*
278          * Split into power-of-two blocks, in case we are given a size that is
279          * not itself a power-of-two.
280          */
281         do {
282                 struct drm_buddy_block *root;
283                 unsigned int order;
284                 u64 root_size;
285
286                 order = ilog2(size) - ilog2(chunk_size);
287                 root_size = chunk_size << order;
288
289                 root = drm_block_alloc(mm, NULL, order, offset);
290                 if (!root)
291                         goto out_free_roots;
292
293                 mark_free(mm, root);
294
295                 BUG_ON(i > mm->max_order);
296                 BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
297
298                 mm->roots[i] = root;
299
300                 offset += root_size;
301                 size -= root_size;
302                 i++;
303         } while (size);
304
305         return 0;
306
307 out_free_roots:
308         while (i--)
309                 drm_block_free(mm, mm->roots[i]);
310         kfree(mm->roots);
311 out_free_list:
312         kfree(mm->free_list);
313         return -ENOMEM;
314 }
315 EXPORT_SYMBOL(drm_buddy_init);
316
317 /**
318  * drm_buddy_fini - tear down the memory manager
319  *
320  * @mm: DRM buddy manager to free
321  *
322  * Cleanup memory manager resources and the freelist
323  */
324 void drm_buddy_fini(struct drm_buddy *mm)
325 {
326         u64 root_size, size;
327         unsigned int order;
328         int i;
329
330         size = mm->size;
331
332         for (i = 0; i < mm->n_roots; ++i) {
333                 order = ilog2(size) - ilog2(mm->chunk_size);
334                 __force_merge(mm, 0, size, order);
335
336                 WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
337                 drm_block_free(mm, mm->roots[i]);
338
339                 root_size = mm->chunk_size << order;
340                 size -= root_size;
341         }
342
343         WARN_ON(mm->avail != mm->size);
344
345         kfree(mm->roots);
346         kfree(mm->free_list);
347 }
348 EXPORT_SYMBOL(drm_buddy_fini);
349
350 static int split_block(struct drm_buddy *mm,
351                        struct drm_buddy_block *block)
352 {
353         unsigned int block_order = drm_buddy_block_order(block) - 1;
354         u64 offset = drm_buddy_block_offset(block);
355
356         BUG_ON(!drm_buddy_block_is_free(block));
357         BUG_ON(!drm_buddy_block_order(block));
358
359         block->left = drm_block_alloc(mm, block, block_order, offset);
360         if (!block->left)
361                 return -ENOMEM;
362
363         block->right = drm_block_alloc(mm, block, block_order,
364                                        offset + (mm->chunk_size << block_order));
365         if (!block->right) {
366                 drm_block_free(mm, block->left);
367                 return -ENOMEM;
368         }
369
370         mark_free(mm, block->left);
371         mark_free(mm, block->right);
372
373         if (drm_buddy_block_is_clear(block)) {
374                 mark_cleared(block->left);
375                 mark_cleared(block->right);
376                 clear_reset(block);
377         }
378
379         mark_split(block);
380
381         return 0;
382 }
383
384 /**
385  * drm_get_buddy - get buddy address
386  *
387  * @block: DRM buddy block
388  *
389  * Returns the corresponding buddy block for @block, or NULL
390  * if this is a root block and can't be merged further.
391  * Requires some kind of locking to protect against
392  * any concurrent allocate and free operations.
393  */
394 struct drm_buddy_block *
395 drm_get_buddy(struct drm_buddy_block *block)
396 {
397         return __get_buddy(block);
398 }
399 EXPORT_SYMBOL(drm_get_buddy);
400
401 /**
402  * drm_buddy_free_block - free a block
403  *
404  * @mm: DRM buddy manager
405  * @block: block to be freed
406  */
407 void drm_buddy_free_block(struct drm_buddy *mm,
408                           struct drm_buddy_block *block)
409 {
410         BUG_ON(!drm_buddy_block_is_allocated(block));
411         mm->avail += drm_buddy_block_size(mm, block);
412         if (drm_buddy_block_is_clear(block))
413                 mm->clear_avail += drm_buddy_block_size(mm, block);
414
415         __drm_buddy_free(mm, block, false);
416 }
417 EXPORT_SYMBOL(drm_buddy_free_block);
418
419 static void __drm_buddy_free_list(struct drm_buddy *mm,
420                                   struct list_head *objects,
421                                   bool mark_clear,
422                                   bool mark_dirty)
423 {
424         struct drm_buddy_block *block, *on;
425
426         WARN_ON(mark_dirty && mark_clear);
427
428         list_for_each_entry_safe(block, on, objects, link) {
429                 if (mark_clear)
430                         mark_cleared(block);
431                 else if (mark_dirty)
432                         clear_reset(block);
433                 drm_buddy_free_block(mm, block);
434                 cond_resched();
435         }
436         INIT_LIST_HEAD(objects);
437 }
438
439 static void drm_buddy_free_list_internal(struct drm_buddy *mm,
440                                          struct list_head *objects)
441 {
442         /*
443          * Don't touch the clear/dirty bit, since allocation is still internal
444          * at this point. For example we might have just failed part of the
445          * allocation.
446          */
447         __drm_buddy_free_list(mm, objects, false, false);
448 }
449
450 /**
451  * drm_buddy_free_list - free blocks
452  *
453  * @mm: DRM buddy manager
454  * @objects: input list head to free blocks
455  * @flags: optional flags like DRM_BUDDY_CLEARED
456  */
457 void drm_buddy_free_list(struct drm_buddy *mm,
458                          struct list_head *objects,
459                          unsigned int flags)
460 {
461         bool mark_clear = flags & DRM_BUDDY_CLEARED;
462
463         __drm_buddy_free_list(mm, objects, mark_clear, !mark_clear);
464 }
465 EXPORT_SYMBOL(drm_buddy_free_list);
466
467 static bool block_incompatible(struct drm_buddy_block *block, unsigned int flags)
468 {
469         bool needs_clear = flags & DRM_BUDDY_CLEAR_ALLOCATION;
470
471         return needs_clear != drm_buddy_block_is_clear(block);
472 }
473
474 static struct drm_buddy_block *
475 __alloc_range_bias(struct drm_buddy *mm,
476                    u64 start, u64 end,
477                    unsigned int order,
478                    unsigned long flags,
479                    bool fallback)
480 {
481         u64 req_size = mm->chunk_size << order;
482         struct drm_buddy_block *block;
483         struct drm_buddy_block *buddy;
484         LIST_HEAD(dfs);
485         int err;
486         int i;
487
488         end = end - 1;
489
490         for (i = 0; i < mm->n_roots; ++i)
491                 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
492
493         do {
494                 u64 block_start;
495                 u64 block_end;
496
497                 block = list_first_entry_or_null(&dfs,
498                                                  struct drm_buddy_block,
499                                                  tmp_link);
500                 if (!block)
501                         break;
502
503                 list_del(&block->tmp_link);
504
505                 if (drm_buddy_block_order(block) < order)
506                         continue;
507
508                 block_start = drm_buddy_block_offset(block);
509                 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
510
511                 if (!overlaps(start, end, block_start, block_end))
512                         continue;
513
514                 if (drm_buddy_block_is_allocated(block))
515                         continue;
516
517                 if (block_start < start || block_end > end) {
518                         u64 adjusted_start = max(block_start, start);
519                         u64 adjusted_end = min(block_end, end);
520
521                         if (round_down(adjusted_end + 1, req_size) <=
522                             round_up(adjusted_start, req_size))
523                                 continue;
524                 }
525
526                 if (contains(start, end, block_start, block_end) &&
527                     order == drm_buddy_block_order(block)) {
528                         if (!fallback && block_incompatible(block, flags))
529                                 continue;
530
531                         /*
532                          * Find the free block within the range.
533                          */
534                         if (drm_buddy_block_is_free(block))
535                                 return block;
536
537                         continue;
538                 }
539
540                 if (!drm_buddy_block_is_split(block)) {
541                         err = split_block(mm, block);
542                         if (unlikely(err))
543                                 goto err_undo;
544                 }
545
546                 list_add(&block->right->tmp_link, &dfs);
547                 list_add(&block->left->tmp_link, &dfs);
548         } while (1);
549
550         return ERR_PTR(-ENOSPC);
551
552 err_undo:
553         /*
554          * We really don't want to leave around a bunch of split blocks, since
555          * bigger is better, so make sure we merge everything back before we
556          * free the allocated blocks.
557          */
558         buddy = __get_buddy(block);
559         if (buddy &&
560             (drm_buddy_block_is_free(block) &&
561              drm_buddy_block_is_free(buddy)))
562                 __drm_buddy_free(mm, block, false);
563         return ERR_PTR(err);
564 }
565
566 static struct drm_buddy_block *
567 __drm_buddy_alloc_range_bias(struct drm_buddy *mm,
568                              u64 start, u64 end,
569                              unsigned int order,
570                              unsigned long flags)
571 {
572         struct drm_buddy_block *block;
573         bool fallback = false;
574
575         block = __alloc_range_bias(mm, start, end, order,
576                                    flags, fallback);
577         if (IS_ERR(block) && mm->clear_avail)
578                 return __alloc_range_bias(mm, start, end, order,
579                                           flags, !fallback);
580
581         return block;
582 }
583
584 static struct drm_buddy_block *
585 get_maxblock(struct drm_buddy *mm, unsigned int order,
586              unsigned long flags)
587 {
588         struct drm_buddy_block *max_block = NULL, *block = NULL;
589         unsigned int i;
590
591         for (i = order; i <= mm->max_order; ++i) {
592                 struct drm_buddy_block *tmp_block;
593
594                 list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) {
595                         if (block_incompatible(tmp_block, flags))
596                                 continue;
597
598                         block = tmp_block;
599                         break;
600                 }
601
602                 if (!block)
603                         continue;
604
605                 if (!max_block) {
606                         max_block = block;
607                         continue;
608                 }
609
610                 if (drm_buddy_block_offset(block) >
611                     drm_buddy_block_offset(max_block)) {
612                         max_block = block;
613                 }
614         }
615
616         return max_block;
617 }
618
619 static struct drm_buddy_block *
620 alloc_from_freelist(struct drm_buddy *mm,
621                     unsigned int order,
622                     unsigned long flags)
623 {
624         struct drm_buddy_block *block = NULL;
625         unsigned int tmp;
626         int err;
627
628         if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
629                 block = get_maxblock(mm, order, flags);
630                 if (block)
631                         /* Store the obtained block order */
632                         tmp = drm_buddy_block_order(block);
633         } else {
634                 for (tmp = order; tmp <= mm->max_order; ++tmp) {
635                         struct drm_buddy_block *tmp_block;
636
637                         list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) {
638                                 if (block_incompatible(tmp_block, flags))
639                                         continue;
640
641                                 block = tmp_block;
642                                 break;
643                         }
644
645                         if (block)
646                                 break;
647                 }
648         }
649
650         if (!block) {
651                 /* Fallback method */
652                 for (tmp = order; tmp <= mm->max_order; ++tmp) {
653                         if (!list_empty(&mm->free_list[tmp])) {
654                                 block = list_last_entry(&mm->free_list[tmp],
655                                                         struct drm_buddy_block,
656                                                         link);
657                                 if (block)
658                                         break;
659                         }
660                 }
661
662                 if (!block)
663                         return ERR_PTR(-ENOSPC);
664         }
665
666         BUG_ON(!drm_buddy_block_is_free(block));
667
668         while (tmp != order) {
669                 err = split_block(mm, block);
670                 if (unlikely(err))
671                         goto err_undo;
672
673                 block = block->right;
674                 tmp--;
675         }
676         return block;
677
678 err_undo:
679         if (tmp != order)
680                 __drm_buddy_free(mm, block, false);
681         return ERR_PTR(err);
682 }
683
684 static int __alloc_range(struct drm_buddy *mm,
685                          struct list_head *dfs,
686                          u64 start, u64 size,
687                          struct list_head *blocks,
688                          u64 *total_allocated_on_err)
689 {
690         struct drm_buddy_block *block;
691         struct drm_buddy_block *buddy;
692         u64 total_allocated = 0;
693         LIST_HEAD(allocated);
694         u64 end;
695         int err;
696
697         end = start + size - 1;
698
699         do {
700                 u64 block_start;
701                 u64 block_end;
702
703                 block = list_first_entry_or_null(dfs,
704                                                  struct drm_buddy_block,
705                                                  tmp_link);
706                 if (!block)
707                         break;
708
709                 list_del(&block->tmp_link);
710
711                 block_start = drm_buddy_block_offset(block);
712                 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
713
714                 if (!overlaps(start, end, block_start, block_end))
715                         continue;
716
717                 if (drm_buddy_block_is_allocated(block)) {
718                         err = -ENOSPC;
719                         goto err_free;
720                 }
721
722                 if (contains(start, end, block_start, block_end)) {
723                         if (drm_buddy_block_is_free(block)) {
724                                 mark_allocated(block);
725                                 total_allocated += drm_buddy_block_size(mm, block);
726                                 mm->avail -= drm_buddy_block_size(mm, block);
727                                 if (drm_buddy_block_is_clear(block))
728                                         mm->clear_avail -= drm_buddy_block_size(mm, block);
729                                 list_add_tail(&block->link, &allocated);
730                                 continue;
731                         } else if (!mm->clear_avail) {
732                                 err = -ENOSPC;
733                                 goto err_free;
734                         }
735                 }
736
737                 if (!drm_buddy_block_is_split(block)) {
738                         err = split_block(mm, block);
739                         if (unlikely(err))
740                                 goto err_undo;
741                 }
742
743                 list_add(&block->right->tmp_link, dfs);
744                 list_add(&block->left->tmp_link, dfs);
745         } while (1);
746
747         if (total_allocated < size) {
748                 err = -ENOSPC;
749                 goto err_free;
750         }
751
752         list_splice_tail(&allocated, blocks);
753
754         return 0;
755
756 err_undo:
757         /*
758          * We really don't want to leave around a bunch of split blocks, since
759          * bigger is better, so make sure we merge everything back before we
760          * free the allocated blocks.
761          */
762         buddy = __get_buddy(block);
763         if (buddy &&
764             (drm_buddy_block_is_free(block) &&
765              drm_buddy_block_is_free(buddy)))
766                 __drm_buddy_free(mm, block, false);
767
768 err_free:
769         if (err == -ENOSPC && total_allocated_on_err) {
770                 list_splice_tail(&allocated, blocks);
771                 *total_allocated_on_err = total_allocated;
772         } else {
773                 drm_buddy_free_list_internal(mm, &allocated);
774         }
775
776         return err;
777 }
778
779 static int __drm_buddy_alloc_range(struct drm_buddy *mm,
780                                    u64 start,
781                                    u64 size,
782                                    u64 *total_allocated_on_err,
783                                    struct list_head *blocks)
784 {
785         LIST_HEAD(dfs);
786         int i;
787
788         for (i = 0; i < mm->n_roots; ++i)
789                 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
790
791         return __alloc_range(mm, &dfs, start, size,
792                              blocks, total_allocated_on_err);
793 }
794
795 static int __alloc_contig_try_harder(struct drm_buddy *mm,
796                                      u64 size,
797                                      u64 min_block_size,
798                                      struct list_head *blocks)
799 {
800         u64 rhs_offset, lhs_offset, lhs_size, filled;
801         struct drm_buddy_block *block;
802         struct list_head *list;
803         LIST_HEAD(blocks_lhs);
804         unsigned long pages;
805         unsigned int order;
806         u64 modify_size;
807         int err;
808
809         modify_size = rounddown_pow_of_two(size);
810         pages = modify_size >> ilog2(mm->chunk_size);
811         order = fls(pages) - 1;
812         if (order == 0)
813                 return -ENOSPC;
814
815         list = &mm->free_list[order];
816         if (list_empty(list))
817                 return -ENOSPC;
818
819         list_for_each_entry_reverse(block, list, link) {
820                 /* Allocate blocks traversing RHS */
821                 rhs_offset = drm_buddy_block_offset(block);
822                 err =  __drm_buddy_alloc_range(mm, rhs_offset, size,
823                                                &filled, blocks);
824                 if (!err || err != -ENOSPC)
825                         return err;
826
827                 lhs_size = max((size - filled), min_block_size);
828                 if (!IS_ALIGNED(lhs_size, min_block_size))
829                         lhs_size = round_up(lhs_size, min_block_size);
830
831                 /* Allocate blocks traversing LHS */
832                 lhs_offset = drm_buddy_block_offset(block) - lhs_size;
833                 err =  __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
834                                                NULL, &blocks_lhs);
835                 if (!err) {
836                         list_splice(&blocks_lhs, blocks);
837                         return 0;
838                 } else if (err != -ENOSPC) {
839                         drm_buddy_free_list_internal(mm, blocks);
840                         return err;
841                 }
842                 /* Free blocks for the next iteration */
843                 drm_buddy_free_list_internal(mm, blocks);
844         }
845
846         return -ENOSPC;
847 }
848
849 /**
850  * drm_buddy_block_trim - free unused pages
851  *
852  * @mm: DRM buddy manager
853  * @new_size: original size requested
854  * @blocks: Input and output list of allocated blocks.
855  * MUST contain single block as input to be trimmed.
856  * On success will contain the newly allocated blocks
857  * making up the @new_size. Blocks always appear in
858  * ascending order
859  *
860  * For contiguous allocation, we round up the size to the nearest
861  * power of two value, drivers consume *actual* size, so remaining
862  * portions are unused and can be optionally freed with this function
863  *
864  * Returns:
865  * 0 on success, error code on failure.
866  */
867 int drm_buddy_block_trim(struct drm_buddy *mm,
868                          u64 new_size,
869                          struct list_head *blocks)
870 {
871         struct drm_buddy_block *parent;
872         struct drm_buddy_block *block;
873         LIST_HEAD(dfs);
874         u64 new_start;
875         int err;
876
877         if (!list_is_singular(blocks))
878                 return -EINVAL;
879
880         block = list_first_entry(blocks,
881                                  struct drm_buddy_block,
882                                  link);
883
884         if (WARN_ON(!drm_buddy_block_is_allocated(block)))
885                 return -EINVAL;
886
887         if (new_size > drm_buddy_block_size(mm, block))
888                 return -EINVAL;
889
890         if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
891                 return -EINVAL;
892
893         if (new_size == drm_buddy_block_size(mm, block))
894                 return 0;
895
896         list_del(&block->link);
897         mark_free(mm, block);
898         mm->avail += drm_buddy_block_size(mm, block);
899         if (drm_buddy_block_is_clear(block))
900                 mm->clear_avail += drm_buddy_block_size(mm, block);
901
902         /* Prevent recursively freeing this node */
903         parent = block->parent;
904         block->parent = NULL;
905
906         new_start = drm_buddy_block_offset(block);
907         list_add(&block->tmp_link, &dfs);
908         err =  __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
909         if (err) {
910                 mark_allocated(block);
911                 mm->avail -= drm_buddy_block_size(mm, block);
912                 if (drm_buddy_block_is_clear(block))
913                         mm->clear_avail -= drm_buddy_block_size(mm, block);
914                 list_add(&block->link, blocks);
915         }
916
917         block->parent = parent;
918         return err;
919 }
920 EXPORT_SYMBOL(drm_buddy_block_trim);
921
922 static struct drm_buddy_block *
923 __drm_buddy_alloc_blocks(struct drm_buddy *mm,
924                          u64 start, u64 end,
925                          unsigned int order,
926                          unsigned long flags)
927 {
928         if (flags & DRM_BUDDY_RANGE_ALLOCATION)
929                 /* Allocate traversing within the range */
930                 return  __drm_buddy_alloc_range_bias(mm, start, end,
931                                                      order, flags);
932         else
933                 /* Allocate from freelist */
934                 return alloc_from_freelist(mm, order, flags);
935 }
936
937 /**
938  * drm_buddy_alloc_blocks - allocate power-of-two blocks
939  *
940  * @mm: DRM buddy manager to allocate from
941  * @start: start of the allowed range for this block
942  * @end: end of the allowed range for this block
943  * @size: size of the allocation in bytes
944  * @min_block_size: alignment of the allocation
945  * @blocks: output list head to add allocated blocks
946  * @flags: DRM_BUDDY_*_ALLOCATION flags
947  *
948  * alloc_range_bias() called on range limitations, which traverses
949  * the tree and returns the desired block.
950  *
951  * alloc_from_freelist() called when *no* range restrictions
952  * are enforced, which picks the block from the freelist.
953  *
954  * Returns:
955  * 0 on success, error code on failure.
956  */
957 int drm_buddy_alloc_blocks(struct drm_buddy *mm,
958                            u64 start, u64 end, u64 size,
959                            u64 min_block_size,
960                            struct list_head *blocks,
961                            unsigned long flags)
962 {
963         struct drm_buddy_block *block = NULL;
964         u64 original_size, original_min_size;
965         unsigned int min_order, order;
966         LIST_HEAD(allocated);
967         unsigned long pages;
968         int err;
969
970         if (size < mm->chunk_size)
971                 return -EINVAL;
972
973         if (min_block_size < mm->chunk_size)
974                 return -EINVAL;
975
976         if (!is_power_of_2(min_block_size))
977                 return -EINVAL;
978
979         if (!IS_ALIGNED(start | end | size, mm->chunk_size))
980                 return -EINVAL;
981
982         if (end > mm->size)
983                 return -EINVAL;
984
985         if (range_overflows(start, size, mm->size))
986                 return -EINVAL;
987
988         /* Actual range allocation */
989         if (start + size == end) {
990                 if (!IS_ALIGNED(start | end, min_block_size))
991                         return -EINVAL;
992
993                 return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
994         }
995
996         original_size = size;
997         original_min_size = min_block_size;
998
999         /* Roundup the size to power of 2 */
1000         if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
1001                 size = roundup_pow_of_two(size);
1002                 min_block_size = size;
1003         /* Align size value to min_block_size */
1004         } else if (!IS_ALIGNED(size, min_block_size)) {
1005                 size = round_up(size, min_block_size);
1006         }
1007
1008         pages = size >> ilog2(mm->chunk_size);
1009         order = fls(pages) - 1;
1010         min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
1011
1012         do {
1013                 order = min(order, (unsigned int)fls(pages) - 1);
1014                 BUG_ON(order > mm->max_order);
1015                 BUG_ON(order < min_order);
1016
1017                 do {
1018                         block = __drm_buddy_alloc_blocks(mm, start,
1019                                                          end,
1020                                                          order,
1021                                                          flags);
1022                         if (!IS_ERR(block))
1023                                 break;
1024
1025                         if (order-- == min_order) {
1026                                 /* Try allocation through force merge method */
1027                                 if (mm->clear_avail &&
1028                                     !__force_merge(mm, start, end, min_order)) {
1029                                         block = __drm_buddy_alloc_blocks(mm, start,
1030                                                                          end,
1031                                                                          min_order,
1032                                                                          flags);
1033                                         if (!IS_ERR(block)) {
1034                                                 order = min_order;
1035                                                 break;
1036                                         }
1037                                 }
1038
1039                                 /*
1040                                  * Try contiguous block allocation through
1041                                  * try harder method.
1042                                  */
1043                                 if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
1044                                     !(flags & DRM_BUDDY_RANGE_ALLOCATION))
1045                                         return __alloc_contig_try_harder(mm,
1046                                                                          original_size,
1047                                                                          original_min_size,
1048                                                                          blocks);
1049                                 err = -ENOSPC;
1050                                 goto err_free;
1051                         }
1052                 } while (1);
1053
1054                 mark_allocated(block);
1055                 mm->avail -= drm_buddy_block_size(mm, block);
1056                 if (drm_buddy_block_is_clear(block))
1057                         mm->clear_avail -= drm_buddy_block_size(mm, block);
1058                 kmemleak_update_trace(block);
1059                 list_add_tail(&block->link, &allocated);
1060
1061                 pages -= BIT(order);
1062
1063                 if (!pages)
1064                         break;
1065         } while (1);
1066
1067         /* Trim the allocated block to the required size */
1068         if (original_size != size) {
1069                 struct list_head *trim_list;
1070                 LIST_HEAD(temp);
1071                 u64 trim_size;
1072
1073                 trim_list = &allocated;
1074                 trim_size = original_size;
1075
1076                 if (!list_is_singular(&allocated)) {
1077                         block = list_last_entry(&allocated, typeof(*block), link);
1078                         list_move(&block->link, &temp);
1079                         trim_list = &temp;
1080                         trim_size = drm_buddy_block_size(mm, block) -
1081                                 (size - original_size);
1082                 }
1083
1084                 drm_buddy_block_trim(mm,
1085                                      trim_size,
1086                                      trim_list);
1087
1088                 if (!list_empty(&temp))
1089                         list_splice_tail(trim_list, &allocated);
1090         }
1091
1092         list_splice_tail(&allocated, blocks);
1093         return 0;
1094
1095 err_free:
1096         drm_buddy_free_list_internal(mm, &allocated);
1097         return err;
1098 }
1099 EXPORT_SYMBOL(drm_buddy_alloc_blocks);
1100
1101 /**
1102  * drm_buddy_block_print - print block information
1103  *
1104  * @mm: DRM buddy manager
1105  * @block: DRM buddy block
1106  * @p: DRM printer to use
1107  */
1108 void drm_buddy_block_print(struct drm_buddy *mm,
1109                            struct drm_buddy_block *block,
1110                            struct drm_printer *p)
1111 {
1112         u64 start = drm_buddy_block_offset(block);
1113         u64 size = drm_buddy_block_size(mm, block);
1114
1115         drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
1116 }
1117 EXPORT_SYMBOL(drm_buddy_block_print);
1118
1119 /**
1120  * drm_buddy_print - print allocator state
1121  *
1122  * @mm: DRM buddy manager
1123  * @p: DRM printer to use
1124  */
1125 void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
1126 {
1127         int order;
1128
1129         drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_free: %lluMiB\n",
1130                    mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20);
1131
1132         for (order = mm->max_order; order >= 0; order--) {
1133                 struct drm_buddy_block *block;
1134                 u64 count = 0, free;
1135
1136                 list_for_each_entry(block, &mm->free_list[order], link) {
1137                         BUG_ON(!drm_buddy_block_is_free(block));
1138                         count++;
1139                 }
1140
1141                 drm_printf(p, "order-%2d ", order);
1142
1143                 free = count * (mm->chunk_size << order);
1144                 if (free < SZ_1M)
1145                         drm_printf(p, "free: %8llu KiB", free >> 10);
1146                 else
1147                         drm_printf(p, "free: %8llu MiB", free >> 20);
1148
1149                 drm_printf(p, ", blocks: %llu\n", count);
1150         }
1151 }
1152 EXPORT_SYMBOL(drm_buddy_print);
1153
1154 static void drm_buddy_module_exit(void)
1155 {
1156         kmem_cache_destroy(slab_blocks);
1157 }
1158
1159 static int __init drm_buddy_module_init(void)
1160 {
1161         slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
1162         if (!slab_blocks)
1163                 return -ENOMEM;
1164
1165         return 0;
1166 }
1167
1168 module_init(drm_buddy_module_init);
1169 module_exit(drm_buddy_module_exit);
1170
1171 MODULE_DESCRIPTION("DRM Buddy Allocator");
1172 MODULE_LICENSE("Dual MIT/GPL");
This page took 0.105116 seconds and 4 git commands to generate.