1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2020 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
23 * Authors: Christian König
26 /* Pooling of allocated pages is necessary because changing the caching
27 * attributes on x86 of the linear mapping requires a costly cross CPU TLB
28 * invalidate for those addresses.
30 * Additional to that allocations from the DMA coherent API are pooled as well
31 * cause they are rather slow compared to alloc_pages+map.
34 #include <linux/module.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/debugfs.h>
37 #include <linux/highmem.h>
38 #include <linux/sched/mm.h>
41 #include <asm/set_memory.h>
44 #include <drm/ttm/ttm_pool.h>
45 #include <drm/ttm/ttm_tt.h>
46 #include <drm/ttm/ttm_bo.h>
48 #include "ttm_module.h"
51 * struct ttm_pool_dma - Helper object for coherent DMA mappings
53 * @addr: original DMA address returned for the mapping
54 * @vaddr: original vaddr return for the mapping and order in the lower bits
61 static unsigned long page_pool_size;
63 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
64 module_param(page_pool_size, ulong, 0644);
66 static atomic_long_t allocated_pages;
68 static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
69 static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
71 static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
72 static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];
74 static spinlock_t shrinker_lock;
75 static struct list_head shrinker_list;
76 static struct shrinker *mm_shrinker;
77 static DECLARE_RWSEM(pool_shrink_rwsem);
79 /* Allocate pages of size 1 << order with the given gfp_flags */
80 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
83 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
84 struct ttm_pool_dma *dma;
88 /* Don't set the __GFP_COMP flag for higher order allocations.
89 * Mapping pages directly into an userspace process and calling
90 * put_page() on a TTM allocated page is illegal.
93 gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
96 if (!pool->use_dma_alloc) {
97 p = alloc_pages_node(pool->nid, gfp_flags, order);
103 dma = kmalloc(sizeof(*dma), GFP_KERNEL);
108 attr |= DMA_ATTR_NO_WARN;
110 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
111 &dma->addr, gfp_flags, attr);
115 /* TODO: This is an illegal abuse of the DMA API, but we need to rework
116 * TTM page fault handling and extend the DMA API to clean this up.
118 if (is_vmalloc_addr(vaddr))
119 p = vmalloc_to_page(vaddr);
121 p = virt_to_page(vaddr);
123 dma->vaddr = (unsigned long)vaddr | order;
124 p->private = (unsigned long)dma;
132 /* Reset the caching and pages of size 1 << order */
133 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
134 unsigned int order, struct page *p)
136 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
137 struct ttm_pool_dma *dma;
141 /* We don't care that set_pages_wb is inefficient here. This is only
142 * used when we have to shrink and CPU overhead is irrelevant then.
144 if (caching != ttm_cached && !PageHighMem(p))
145 set_pages_wb(p, 1 << order);
148 if (!pool || !pool->use_dma_alloc) {
149 __free_pages(p, order);
154 attr |= DMA_ATTR_NO_WARN;
156 dma = (void *)p->private;
157 vaddr = (void *)(dma->vaddr & PAGE_MASK);
158 dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
163 /* Apply a new caching to an array of pages */
164 static int ttm_pool_apply_caching(struct page **first, struct page **last,
165 enum ttm_caching caching)
168 unsigned int num_pages = last - first;
176 case ttm_write_combined:
177 return set_pages_array_wc(first, num_pages);
179 return set_pages_array_uc(first, num_pages);
185 /* Map pages of 1 << order size and fill the DMA address array */
186 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
187 struct page *p, dma_addr_t **dma_addr)
192 if (pool->use_dma_alloc) {
193 struct ttm_pool_dma *dma = (void *)p->private;
197 size_t size = (1ULL << order) * PAGE_SIZE;
199 addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
200 if (dma_mapping_error(pool->dev, addr))
204 for (i = 1 << order; i ; --i) {
205 *(*dma_addr)++ = addr;
212 /* Unmap pages of 1 << order size */
213 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
214 unsigned int num_pages)
216 /* Unmapped while freeing the page */
217 if (pool->use_dma_alloc)
220 dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
224 /* Give pages into a specific pool_type */
225 static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
227 unsigned int i, num_pages = 1 << pt->order;
229 for (i = 0; i < num_pages; ++i) {
231 clear_highpage(p + i);
233 clear_page(page_address(p + i));
236 spin_lock(&pt->lock);
237 list_add(&p->lru, &pt->pages);
238 spin_unlock(&pt->lock);
239 atomic_long_add(1 << pt->order, &allocated_pages);
242 /* Take pages from a specific pool_type, return NULL when nothing available */
243 static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
247 spin_lock(&pt->lock);
248 p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
250 atomic_long_sub(1 << pt->order, &allocated_pages);
253 spin_unlock(&pt->lock);
258 /* Initialize and add a pool type to the global shrinker list */
259 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
260 enum ttm_caching caching, unsigned int order)
263 pt->caching = caching;
265 spin_lock_init(&pt->lock);
266 INIT_LIST_HEAD(&pt->pages);
268 spin_lock(&shrinker_lock);
269 list_add_tail(&pt->shrinker_list, &shrinker_list);
270 spin_unlock(&shrinker_lock);
273 /* Remove a pool_type from the global shrinker list and free all pages */
274 static void ttm_pool_type_fini(struct ttm_pool_type *pt)
278 spin_lock(&shrinker_lock);
279 list_del(&pt->shrinker_list);
280 spin_unlock(&shrinker_lock);
282 while ((p = ttm_pool_type_take(pt)))
283 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
286 /* Return the pool_type to use for the given caching and order */
287 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
288 enum ttm_caching caching,
291 if (pool->use_dma_alloc)
292 return &pool->caching[caching].orders[order];
296 case ttm_write_combined:
297 if (pool->nid != NUMA_NO_NODE)
298 return &pool->caching[caching].orders[order];
301 return &global_dma32_write_combined[order];
303 return &global_write_combined[order];
305 if (pool->nid != NUMA_NO_NODE)
306 return &pool->caching[caching].orders[order];
309 return &global_dma32_uncached[order];
311 return &global_uncached[order];
320 /* Free pages using the global shrinker list */
321 static unsigned int ttm_pool_shrink(void)
323 struct ttm_pool_type *pt;
324 unsigned int num_pages;
327 down_read(&pool_shrink_rwsem);
328 spin_lock(&shrinker_lock);
329 pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
330 list_move_tail(&pt->shrinker_list, &shrinker_list);
331 spin_unlock(&shrinker_lock);
333 p = ttm_pool_type_take(pt);
335 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
336 num_pages = 1 << pt->order;
340 up_read(&pool_shrink_rwsem);
345 /* Return the allocation order based for a page */
346 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
348 if (pool->use_dma_alloc) {
349 struct ttm_pool_dma *dma = (void *)p->private;
351 return dma->vaddr & ~PAGE_MASK;
357 /* Called when we got a page, either from a pool or newly allocated */
358 static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
359 struct page *p, dma_addr_t **dma_addr,
360 unsigned long *num_pages,
361 struct page ***pages)
367 r = ttm_pool_map(pool, order, p, dma_addr);
372 *num_pages -= 1 << order;
373 for (i = 1 << order; i; --i, ++(*pages), ++p)
380 * ttm_pool_free_range() - Free a range of TTM pages
381 * @pool: The pool used for allocating.
382 * @tt: The struct ttm_tt holding the page pointers.
383 * @caching: The page caching mode used by the range.
384 * @start_page: index for first page to free.
385 * @end_page: index for last page to free + 1.
387 * During allocation the ttm_tt page-vector may be populated with ranges of
388 * pages with different attributes if allocation hit an error without being
389 * able to completely fulfill the allocation. This function can be used
390 * to free these individual ranges.
392 static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
393 enum ttm_caching caching,
394 pgoff_t start_page, pgoff_t end_page)
396 struct page **pages = &tt->pages[start_page];
400 for (i = start_page; i < end_page; i += nr, pages += nr) {
401 struct ttm_pool_type *pt = NULL;
403 order = ttm_pool_page_order(pool, *pages);
406 ttm_pool_unmap(pool, tt->dma_address[i], nr);
408 pt = ttm_pool_select_type(pool, caching, order);
410 ttm_pool_type_give(pt, *pages);
412 ttm_pool_free_page(pool, caching, order, *pages);
417 * ttm_pool_alloc - Fill a ttm_tt object
419 * @pool: ttm_pool to use
420 * @tt: ttm_tt object to fill
421 * @ctx: operation context
423 * Fill the ttm_tt object with pages and also make sure to DMA map them when
426 * Returns: 0 on successe, negative error code otherwise.
428 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
429 struct ttm_operation_ctx *ctx)
431 pgoff_t num_pages = tt->num_pages;
432 dma_addr_t *dma_addr = tt->dma_address;
433 struct page **caching = tt->pages;
434 struct page **pages = tt->pages;
435 enum ttm_caching page_caching;
436 gfp_t gfp_flags = GFP_USER;
437 pgoff_t caching_divide;
442 WARN_ON(!num_pages || ttm_tt_is_populated(tt));
443 WARN_ON(dma_addr && !pool->dev);
445 if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
446 gfp_flags |= __GFP_ZERO;
448 if (ctx->gfp_retry_mayfail)
449 gfp_flags |= __GFP_RETRY_MAYFAIL;
452 gfp_flags |= GFP_DMA32;
454 gfp_flags |= GFP_HIGHUSER;
456 for (order = min_t(unsigned int, MAX_PAGE_ORDER, __fls(num_pages));
458 order = min_t(unsigned int, order, __fls(num_pages))) {
459 struct ttm_pool_type *pt;
461 page_caching = tt->caching;
462 pt = ttm_pool_select_type(pool, tt->caching, order);
463 p = pt ? ttm_pool_type_take(pt) : NULL;
465 r = ttm_pool_apply_caching(caching, pages,
468 goto error_free_page;
472 r = ttm_pool_page_allocated(pool, order, p,
477 goto error_free_page;
480 if (num_pages < (1 << order))
483 p = ttm_pool_type_take(pt);
487 page_caching = ttm_cached;
488 while (num_pages >= (1 << order) &&
489 (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
491 if (PageHighMem(p)) {
492 r = ttm_pool_apply_caching(caching, pages,
495 goto error_free_page;
498 r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
501 goto error_free_page;
516 r = ttm_pool_apply_caching(caching, pages, tt->caching);
523 ttm_pool_free_page(pool, page_caching, order, p);
526 num_pages = tt->num_pages - num_pages;
527 caching_divide = caching - tt->pages;
528 ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
529 ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
533 EXPORT_SYMBOL(ttm_pool_alloc);
536 * ttm_pool_free - Free the backing pages from a ttm_tt object
538 * @pool: Pool to give pages back to.
539 * @tt: ttm_tt object to unpopulate
541 * Give the packing pages back to a pool or free them
543 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
545 ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
547 while (atomic_long_read(&allocated_pages) > page_pool_size)
550 EXPORT_SYMBOL(ttm_pool_free);
553 * ttm_pool_init - Initialize a pool
555 * @pool: the pool to initialize
556 * @dev: device for DMA allocations and mappings
557 * @nid: NUMA node to use for allocations
558 * @use_dma_alloc: true if coherent DMA alloc should be used
559 * @use_dma32: true if GFP_DMA32 should be used
561 * Initialize the pool and its pool types.
563 void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
564 int nid, bool use_dma_alloc, bool use_dma32)
568 WARN_ON(!dev && use_dma_alloc);
572 pool->use_dma_alloc = use_dma_alloc;
573 pool->use_dma32 = use_dma32;
575 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
576 for (j = 0; j < NR_PAGE_ORDERS; ++j) {
577 struct ttm_pool_type *pt;
579 /* Initialize only pool types which are actually used */
580 pt = ttm_pool_select_type(pool, i, j);
581 if (pt != &pool->caching[i].orders[j])
584 ttm_pool_type_init(pt, pool, i, j);
588 EXPORT_SYMBOL(ttm_pool_init);
591 * ttm_pool_synchronize_shrinkers - Wait for all running shrinkers to complete.
593 * This is useful to guarantee that all shrinker invocations have seen an
594 * update, before freeing memory, similar to rcu.
596 static void ttm_pool_synchronize_shrinkers(void)
598 down_write(&pool_shrink_rwsem);
599 up_write(&pool_shrink_rwsem);
603 * ttm_pool_fini - Cleanup a pool
605 * @pool: the pool to clean up
607 * Free all pages in the pool and unregister the types from the global
610 void ttm_pool_fini(struct ttm_pool *pool)
614 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
615 for (j = 0; j < NR_PAGE_ORDERS; ++j) {
616 struct ttm_pool_type *pt;
618 pt = ttm_pool_select_type(pool, i, j);
619 if (pt != &pool->caching[i].orders[j])
622 ttm_pool_type_fini(pt);
626 /* We removed the pool types from the LRU, but we need to also make sure
627 * that no shrinker is concurrently freeing pages from the pool.
629 ttm_pool_synchronize_shrinkers();
631 EXPORT_SYMBOL(ttm_pool_fini);
633 /* As long as pages are available make sure to release at least one */
634 static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
635 struct shrink_control *sc)
637 unsigned long num_freed = 0;
640 num_freed += ttm_pool_shrink();
641 while (!num_freed && atomic_long_read(&allocated_pages));
646 /* Return the number of pages available or SHRINK_EMPTY if we have none */
647 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
648 struct shrink_control *sc)
650 unsigned long num_pages = atomic_long_read(&allocated_pages);
652 return num_pages ? num_pages : SHRINK_EMPTY;
655 #ifdef CONFIG_DEBUG_FS
656 /* Count the number of pages available in a pool_type */
657 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
659 unsigned int count = 0;
662 spin_lock(&pt->lock);
663 /* Only used for debugfs, the overhead doesn't matter */
664 list_for_each_entry(p, &pt->pages, lru)
666 spin_unlock(&pt->lock);
671 /* Print a nice header for the order */
672 static void ttm_pool_debugfs_header(struct seq_file *m)
677 for (i = 0; i < NR_PAGE_ORDERS; ++i)
678 seq_printf(m, " ---%2u---", i);
682 /* Dump information about the different pool types */
683 static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
688 for (i = 0; i < NR_PAGE_ORDERS; ++i)
689 seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
693 /* Dump the total amount of allocated pages */
694 static void ttm_pool_debugfs_footer(struct seq_file *m)
696 seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
697 atomic_long_read(&allocated_pages), page_pool_size);
700 /* Dump the information for the global pools */
701 static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
703 ttm_pool_debugfs_header(m);
705 spin_lock(&shrinker_lock);
706 seq_puts(m, "wc\t:");
707 ttm_pool_debugfs_orders(global_write_combined, m);
708 seq_puts(m, "uc\t:");
709 ttm_pool_debugfs_orders(global_uncached, m);
710 seq_puts(m, "wc 32\t:");
711 ttm_pool_debugfs_orders(global_dma32_write_combined, m);
712 seq_puts(m, "uc 32\t:");
713 ttm_pool_debugfs_orders(global_dma32_uncached, m);
714 spin_unlock(&shrinker_lock);
716 ttm_pool_debugfs_footer(m);
720 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
723 * ttm_pool_debugfs - Debugfs dump function for a pool
725 * @pool: the pool to dump the information for
726 * @m: seq_file to dump to
728 * Make a debugfs dump with the per pool and global information.
730 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
734 if (!pool->use_dma_alloc) {
735 seq_puts(m, "unused\n");
739 ttm_pool_debugfs_header(m);
741 spin_lock(&shrinker_lock);
742 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
748 case ttm_write_combined:
749 seq_puts(m, "wc\t:");
752 seq_puts(m, "uc\t:");
755 ttm_pool_debugfs_orders(pool->caching[i].orders, m);
757 spin_unlock(&shrinker_lock);
759 ttm_pool_debugfs_footer(m);
762 EXPORT_SYMBOL(ttm_pool_debugfs);
764 /* Test the shrinker functions and dump the result */
765 static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
767 struct shrink_control sc = { .gfp_mask = GFP_NOFS };
769 fs_reclaim_acquire(GFP_KERNEL);
770 seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(mm_shrinker, &sc),
771 ttm_pool_shrinker_scan(mm_shrinker, &sc));
772 fs_reclaim_release(GFP_KERNEL);
776 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
781 * ttm_pool_mgr_init - Initialize globals
783 * @num_pages: default number of pages
785 * Initialize the global locks and lists for the MM shrinker.
787 int ttm_pool_mgr_init(unsigned long num_pages)
792 page_pool_size = num_pages;
794 spin_lock_init(&shrinker_lock);
795 INIT_LIST_HEAD(&shrinker_list);
797 for (i = 0; i < NR_PAGE_ORDERS; ++i) {
798 ttm_pool_type_init(&global_write_combined[i], NULL,
799 ttm_write_combined, i);
800 ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
802 ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
803 ttm_write_combined, i);
804 ttm_pool_type_init(&global_dma32_uncached[i], NULL,
808 #ifdef CONFIG_DEBUG_FS
809 debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
810 &ttm_pool_debugfs_globals_fops);
811 debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
812 &ttm_pool_debugfs_shrink_fops);
815 mm_shrinker = shrinker_alloc(0, "drm-ttm_pool");
819 mm_shrinker->count_objects = ttm_pool_shrinker_count;
820 mm_shrinker->scan_objects = ttm_pool_shrinker_scan;
821 mm_shrinker->seeks = 1;
823 shrinker_register(mm_shrinker);
829 * ttm_pool_mgr_fini - Finalize globals
831 * Cleanup the global pools and unregister the MM shrinker.
833 void ttm_pool_mgr_fini(void)
837 for (i = 0; i < NR_PAGE_ORDERS; ++i) {
838 ttm_pool_type_fini(&global_write_combined[i]);
839 ttm_pool_type_fini(&global_uncached[i]);
841 ttm_pool_type_fini(&global_dma32_write_combined[i]);
842 ttm_pool_type_fini(&global_dma32_uncached[i]);
845 shrinker_free(mm_shrinker);
846 WARN_ON(!list_empty(&shrinker_list));