2 * Copyright 2011 (c) Oracle Corp.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
27 * A simple DMA pool losely based on dmapool.c. It has certain advantages
29 * - Pool collects resently freed pages for reuse (and hooks up to
31 * - Tracks currently in use pages
32 * - Tracks whether the page is UC, WB or cached (and reverts to WB
36 #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
37 #define pr_fmt(fmt) "[TTM] " fmt
39 #include <linux/dma-mapping.h>
40 #include <linux/list.h>
41 #include <linux/seq_file.h> /* for seq_printf */
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/highmem.h>
45 #include <linux/mm_types.h>
46 #include <linux/module.h>
48 #include <linux/atomic.h>
49 #include <linux/device.h>
50 #include <linux/kthread.h>
51 #include <drm/ttm/ttm_bo_driver.h>
52 #include <drm/ttm/ttm_page_alloc.h>
53 #if IS_ENABLED(CONFIG_AGP)
57 #include <asm/set_memory.h>
60 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
61 #define SMALL_ALLOCATION 4
62 #define FREE_ALL_PAGES (~0U)
63 #define VADDR_FLAG_HUGE_POOL 1UL
64 #define VADDR_FLAG_UPDATED_COUNT 2UL
76 * The pool structure. There are up to nine pools:
77 * - generic (not restricted to DMA32):
78 * - write combined, uncached, cached.
79 * - dma32 (up to 2^32 - so up 4GB):
80 * - write combined, uncached, cached.
81 * - huge (not restricted to DMA32):
82 * - write combined, uncached, cached.
83 * for each 'struct device'. The 'cached' is for pages that are actively used.
84 * The other ones can be shrunk by the shrinker API if neccessary.
85 * @pools: The 'struct device->dma_pools' link.
86 * @type: Type of the pool
87 * @lock: Protects the free_list from concurrnet access. Must be
88 * used with irqsave/irqrestore variants because pool allocator maybe called
90 * @free_list: Pool of pages that are free to be used. No order requirements.
91 * @dev: The device that is associated with these pools.
92 * @size: Size used during DMA allocation.
93 * @npages_free: Count of available pages for re-use.
94 * @npages_in_use: Count of pages that are in use.
95 * @nfrees: Stats when pool is shrinking.
96 * @nrefills: Stats when the pool is grown.
97 * @gfp_flags: Flags to pass for alloc_page.
98 * @name: Name of the pool.
99 * @dev_name: Name derieved from dev - similar to how dev_info works.
100 * Used during shutdown as the dev_info during release is unavailable.
103 struct list_head pools; /* The 'struct device->dma_pools link */
106 struct list_head free_list;
109 unsigned npages_free;
110 unsigned npages_in_use;
111 unsigned long nfrees; /* Stats when shrunk. */
112 unsigned long nrefills; /* Stats when grown. */
114 char name[13]; /* "cached dma32" */
115 char dev_name[64]; /* Constructed from dev */
119 * The accounting page keeping track of the allocated page along with
121 * @page_list: The link to the 'page_list' in 'struct dma_pool'.
122 * @vaddr: The virtual address of the page and a flag if the page belongs to a
124 * @dma: The bus address of the page. If the page is not allocated
125 * via the DMA API, it will be -1.
128 struct list_head page_list;
135 * Limits for the pool. They are handled without locks because only place where
136 * they may change is in sysfs store. They won't have immediate effect anyway
137 * so forcing serialization to access them is pointless.
140 struct ttm_pool_opts {
147 * Contains the list of all of the 'struct device' and their corresponding
148 * DMA pools. Guarded by _mutex->lock.
149 * @pools: The link to 'struct ttm_pool_manager->pools'
150 * @dev: The 'struct device' associated with the 'pool'
151 * @pool: The 'struct dma_pool' associated with the 'dev'
153 struct device_pools {
154 struct list_head pools;
156 struct dma_pool *pool;
160 * struct ttm_pool_manager - Holds memory pools for fast allocation
162 * @lock: Lock used when adding/removing from pools
163 * @pools: List of 'struct device' and 'struct dma_pool' tuples.
164 * @options: Limits for the pool.
165 * @npools: Total amount of pools in existence.
166 * @shrinker: The structure used by [un|]register_shrinker
168 struct ttm_pool_manager {
170 struct list_head pools;
171 struct ttm_pool_opts options;
173 struct shrinker mm_shrink;
177 static struct ttm_pool_manager *_manager;
179 static struct attribute ttm_page_pool_max = {
180 .name = "pool_max_size",
181 .mode = S_IRUGO | S_IWUSR
183 static struct attribute ttm_page_pool_small = {
184 .name = "pool_small_allocation",
185 .mode = S_IRUGO | S_IWUSR
187 static struct attribute ttm_page_pool_alloc_size = {
188 .name = "pool_allocation_size",
189 .mode = S_IRUGO | S_IWUSR
192 static struct attribute *ttm_pool_attrs[] = {
194 &ttm_page_pool_small,
195 &ttm_page_pool_alloc_size,
199 static void ttm_pool_kobj_release(struct kobject *kobj)
201 struct ttm_pool_manager *m =
202 container_of(kobj, struct ttm_pool_manager, kobj);
206 static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
207 const char *buffer, size_t size)
209 struct ttm_pool_manager *m =
210 container_of(kobj, struct ttm_pool_manager, kobj);
214 chars = sscanf(buffer, "%u", &val);
218 /* Convert kb to number of pages */
219 val = val / (PAGE_SIZE >> 10);
221 if (attr == &ttm_page_pool_max) {
222 m->options.max_size = val;
223 } else if (attr == &ttm_page_pool_small) {
224 m->options.small = val;
225 } else if (attr == &ttm_page_pool_alloc_size) {
226 if (val > NUM_PAGES_TO_ALLOC*8) {
227 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
228 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
229 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
231 } else if (val > NUM_PAGES_TO_ALLOC) {
232 pr_warn("Setting allocation size to larger than %lu is not recommended\n",
233 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
235 m->options.alloc_size = val;
241 static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
244 struct ttm_pool_manager *m =
245 container_of(kobj, struct ttm_pool_manager, kobj);
248 if (attr == &ttm_page_pool_max)
249 val = m->options.max_size;
250 else if (attr == &ttm_page_pool_small)
251 val = m->options.small;
252 else if (attr == &ttm_page_pool_alloc_size)
253 val = m->options.alloc_size;
255 val = val * (PAGE_SIZE >> 10);
257 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
260 static const struct sysfs_ops ttm_pool_sysfs_ops = {
261 .show = &ttm_pool_show,
262 .store = &ttm_pool_store,
265 static struct kobj_type ttm_pool_kobj_type = {
266 .release = &ttm_pool_kobj_release,
267 .sysfs_ops = &ttm_pool_sysfs_ops,
268 .default_attrs = ttm_pool_attrs,
272 static int set_pages_array_wb(struct page **pages, int addrinarray)
274 #if IS_ENABLED(CONFIG_AGP)
277 for (i = 0; i < addrinarray; i++)
278 unmap_page_from_agp(pages[i]);
283 static int set_pages_array_wc(struct page **pages, int addrinarray)
285 #if IS_ENABLED(CONFIG_AGP)
288 for (i = 0; i < addrinarray; i++)
289 map_page_into_agp(pages[i]);
294 static int set_pages_array_uc(struct page **pages, int addrinarray)
296 #if IS_ENABLED(CONFIG_AGP)
299 for (i = 0; i < addrinarray; i++)
300 map_page_into_agp(pages[i]);
304 #endif /* for !CONFIG_X86 */
306 static int ttm_set_pages_caching(struct dma_pool *pool,
307 struct page **pages, unsigned cpages)
310 /* Set page caching */
311 if (pool->type & IS_UC) {
312 r = set_pages_array_uc(pages, cpages);
314 pr_err("%s: Failed to set %d pages to uc!\n",
315 pool->dev_name, cpages);
317 if (pool->type & IS_WC) {
318 r = set_pages_array_wc(pages, cpages);
320 pr_err("%s: Failed to set %d pages to wc!\n",
321 pool->dev_name, cpages);
326 static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
328 dma_addr_t dma = d_page->dma;
329 d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
330 dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma);
335 static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
337 struct dma_page *d_page;
338 unsigned long attrs = 0;
341 d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
345 if (pool->type & IS_HUGE)
346 attrs = DMA_ATTR_NO_WARN;
348 vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma,
349 pool->gfp_flags, attrs);
351 if (is_vmalloc_addr(vaddr))
352 d_page->p = vmalloc_to_page(vaddr);
354 d_page->p = virt_to_page(vaddr);
355 d_page->vaddr = (unsigned long)vaddr;
356 if (pool->type & IS_HUGE)
357 d_page->vaddr |= VADDR_FLAG_HUGE_POOL;
364 static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
366 enum pool_type type = IS_UNDEFINED;
368 if (flags & TTM_PAGE_FLAG_DMA32)
370 if (cstate == tt_cached)
372 else if (cstate == tt_uncached)
380 static void ttm_pool_update_free_locked(struct dma_pool *pool,
381 unsigned freed_pages)
383 pool->npages_free -= freed_pages;
384 pool->nfrees += freed_pages;
388 /* set memory back to wb and free the pages. */
389 static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
391 struct page *page = d_page->p;
392 unsigned i, num_pages;
394 /* Don't set WB on WB page pool. */
395 if (!(pool->type & IS_CACHED)) {
396 num_pages = pool->size / PAGE_SIZE;
397 for (i = 0; i < num_pages; ++i, ++page) {
398 if (set_pages_array_wb(&page, 1)) {
399 pr_err("%s: Failed to set %d pages to wb!\n",
405 list_del(&d_page->page_list);
406 __ttm_dma_free_page(pool, d_page);
409 static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
410 struct page *pages[], unsigned npages)
412 struct dma_page *d_page, *tmp;
414 if (pool->type & IS_HUGE) {
415 list_for_each_entry_safe(d_page, tmp, d_pages, page_list)
416 ttm_dma_page_put(pool, d_page);
421 /* Don't set WB on WB page pool. */
422 if (npages && !(pool->type & IS_CACHED) &&
423 set_pages_array_wb(pages, npages))
424 pr_err("%s: Failed to set %d pages to wb!\n",
425 pool->dev_name, npages);
427 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
428 list_del(&d_page->page_list);
429 __ttm_dma_free_page(pool, d_page);
434 * Free pages from pool.
436 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
437 * number of pages in one go.
439 * @pool: to free the pages from
440 * @nr_free: If set to true will free all pages in pool
441 * @use_static: Safe to use static buffer
443 static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
446 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
447 unsigned long irq_flags;
448 struct dma_page *dma_p, *tmp;
449 struct page **pages_to_free;
450 struct list_head d_pages;
451 unsigned freed_pages = 0,
452 npages_to_free = nr_free;
454 if (NUM_PAGES_TO_ALLOC < nr_free)
455 npages_to_free = NUM_PAGES_TO_ALLOC;
458 pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
459 pool->dev_name, pool->name, current->pid,
460 npages_to_free, nr_free);
464 pages_to_free = static_buf;
466 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
469 if (!pages_to_free) {
470 pr_debug("%s: Failed to allocate memory for pool free operation\n",
474 INIT_LIST_HEAD(&d_pages);
476 spin_lock_irqsave(&pool->lock, irq_flags);
478 /* We picking the oldest ones off the list */
479 list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
481 if (freed_pages >= npages_to_free)
484 /* Move the dma_page from one list to another. */
485 list_move(&dma_p->page_list, &d_pages);
487 pages_to_free[freed_pages++] = dma_p->p;
488 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
489 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
491 ttm_pool_update_free_locked(pool, freed_pages);
493 * Because changing page caching is costly
494 * we unlock the pool to prevent stalling.
496 spin_unlock_irqrestore(&pool->lock, irq_flags);
498 ttm_dma_pages_put(pool, &d_pages, pages_to_free,
501 INIT_LIST_HEAD(&d_pages);
503 if (likely(nr_free != FREE_ALL_PAGES))
504 nr_free -= freed_pages;
506 if (NUM_PAGES_TO_ALLOC >= nr_free)
507 npages_to_free = nr_free;
509 npages_to_free = NUM_PAGES_TO_ALLOC;
513 /* free all so restart the processing */
517 /* Not allowed to fall through or break because
518 * following context is inside spinlock while we are
526 /* remove range of pages from the pool */
528 ttm_pool_update_free_locked(pool, freed_pages);
529 nr_free -= freed_pages;
532 spin_unlock_irqrestore(&pool->lock, irq_flags);
535 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
537 if (pages_to_free != static_buf)
538 kfree(pages_to_free);
542 static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
544 struct device_pools *p;
545 struct dma_pool *pool;
550 mutex_lock(&_manager->lock);
551 list_for_each_entry_reverse(p, &_manager->pools, pools) {
555 if (pool->type != type)
563 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
564 if (pool->type != type)
566 /* Takes a spinlock.. */
567 /* OK to use static buffer since global mutex is held. */
568 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
569 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
570 /* This code path is called after _all_ references to the
571 * struct device has been dropped - so nobody should be
572 * touching it. In case somebody is trying to _add_ we are
573 * guarded by the mutex. */
574 list_del(&pool->pools);
578 mutex_unlock(&_manager->lock);
582 * On free-ing of the 'struct device' this deconstructor is run.
583 * Albeit the pool might have already been freed earlier.
585 static void ttm_dma_pool_release(struct device *dev, void *res)
587 struct dma_pool *pool = *(struct dma_pool **)res;
590 ttm_dma_free_pool(dev, pool->type);
593 static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
595 return *(struct dma_pool **)res == match_data;
598 static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
601 const char *n[] = {"wc", "uc", "cached", " dma32", "huge"};
602 enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE};
603 struct device_pools *sec_pool = NULL;
604 struct dma_pool *pool = NULL, **ptr;
612 ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
618 pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
623 sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
628 INIT_LIST_HEAD(&sec_pool->pools);
630 sec_pool->pool = pool;
632 INIT_LIST_HEAD(&pool->free_list);
633 INIT_LIST_HEAD(&pool->pools);
634 spin_lock_init(&pool->lock);
636 pool->npages_free = pool->npages_in_use = 0;
638 pool->gfp_flags = flags;
640 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
641 pool->size = HPAGE_PMD_SIZE;
646 pool->size = PAGE_SIZE;
650 for (i = 0; i < ARRAY_SIZE(t); i++) {
652 p += snprintf(p, sizeof(pool->name) - (p - pool->name),
657 /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
658 * - the kobj->name has already been deallocated.*/
659 snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
660 dev_driver_string(dev), dev_name(dev));
661 mutex_lock(&_manager->lock);
662 /* You can get the dma_pool from either the global: */
663 list_add(&sec_pool->pools, &_manager->pools);
665 /* or from 'struct device': */
666 list_add(&pool->pools, &dev->dma_pools);
667 mutex_unlock(&_manager->lock);
670 devres_add(dev, ptr);
680 static struct dma_pool *ttm_dma_find_pool(struct device *dev,
683 struct dma_pool *pool, *tmp;
685 if (type == IS_UNDEFINED)
688 /* NB: We iterate on the 'struct dev' which has no spinlock, but
689 * it does have a kref which we have taken. The kref is taken during
690 * graphic driver loading - in the drm_pci_init it calls either
691 * pci_dev_get or pci_register_driver which both end up taking a kref
692 * on 'struct device'.
694 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
695 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
696 * thing is at that point of time there are no pages associated with the
697 * driver so this function will not be called.
699 list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools)
700 if (pool->type == type)
706 * Free pages the pages that failed to change the caching state. If there
707 * are pages that have changed their caching state already put them to the
710 static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
711 struct list_head *d_pages,
712 struct page **failed_pages,
715 struct dma_page *d_page, *tmp;
722 /* Find the failed page. */
723 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
726 /* .. and then progress over the full list. */
727 list_del(&d_page->page_list);
728 __ttm_dma_free_page(pool, d_page);
738 * Allocate 'count' pages, and put 'need' number of them on the
739 * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
740 * The full list of pages should also be on 'd_pages'.
741 * We return zero for success, and negative numbers as errors.
743 static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
744 struct list_head *d_pages,
747 struct page **caching_array;
748 struct dma_page *dma_p;
751 unsigned i, j, npages, cpages;
752 unsigned max_cpages = min(count,
753 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
755 /* allocate array for page caching change */
756 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
758 if (!caching_array) {
759 pr_debug("%s: Unable to allocate table for new pages\n",
765 pr_debug("%s: (%s:%d) Getting %d pages\n",
766 pool->dev_name, pool->name, current->pid, count);
768 for (i = 0, cpages = 0; i < count; ++i) {
769 dma_p = __ttm_dma_alloc_page(pool);
771 pr_debug("%s: Unable to get page %u\n",
774 /* store already allocated pages in the pool after
775 * setting the caching state */
777 r = ttm_set_pages_caching(pool, caching_array,
780 ttm_dma_handle_caching_state_failure(
781 pool, d_pages, caching_array,
788 list_add(&dma_p->page_list, d_pages);
790 #ifdef CONFIG_HIGHMEM
791 /* gfp flags of highmem page should never be dma32 so we
792 * we should be fine in such case
798 npages = pool->size / PAGE_SIZE;
799 for (j = 0; j < npages; ++j) {
800 caching_array[cpages++] = p + j;
801 if (cpages == max_cpages) {
802 /* Note: Cannot hold the spinlock */
803 r = ttm_set_pages_caching(pool, caching_array,
806 ttm_dma_handle_caching_state_failure(
807 pool, d_pages, caching_array,
817 r = ttm_set_pages_caching(pool, caching_array, cpages);
819 ttm_dma_handle_caching_state_failure(pool, d_pages,
820 caching_array, cpages);
823 kfree(caching_array);
828 * @return count of pages still required to fulfill the request.
830 static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
831 unsigned long *irq_flags)
833 unsigned count = _manager->options.small;
834 int r = pool->npages_free;
836 if (count > pool->npages_free) {
837 struct list_head d_pages;
839 INIT_LIST_HEAD(&d_pages);
841 spin_unlock_irqrestore(&pool->lock, *irq_flags);
843 /* Returns how many more are neccessary to fulfill the
845 r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
847 spin_lock_irqsave(&pool->lock, *irq_flags);
849 /* Add the fresh to the end.. */
850 list_splice(&d_pages, &pool->free_list);
852 pool->npages_free += count;
855 struct dma_page *d_page;
858 pr_debug("%s: Failed to fill %s pool (r:%d)!\n",
859 pool->dev_name, pool->name, r);
861 list_for_each_entry(d_page, &d_pages, page_list) {
864 list_splice_tail(&d_pages, &pool->free_list);
865 pool->npages_free += cpages;
873 * The populate list is actually a stack (not that is matters as TTM
874 * allocates one page at a time.
875 * return dma_page pointer if success, otherwise NULL.
877 static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
878 struct ttm_dma_tt *ttm_dma,
881 struct dma_page *d_page = NULL;
882 struct ttm_tt *ttm = &ttm_dma->ttm;
883 unsigned long irq_flags;
886 spin_lock_irqsave(&pool->lock, irq_flags);
887 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
889 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
890 ttm->pages[index] = d_page->p;
891 ttm_dma->dma_address[index] = d_page->dma;
892 list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
893 pool->npages_in_use += 1;
894 pool->npages_free -= 1;
896 spin_unlock_irqrestore(&pool->lock, irq_flags);
900 static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
902 struct ttm_tt *ttm = &ttm_dma->ttm;
905 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
906 gfp_flags = GFP_USER | GFP_DMA32;
908 gfp_flags = GFP_HIGHUSER;
909 if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
910 gfp_flags |= __GFP_ZERO;
913 gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
914 __GFP_KSWAPD_RECLAIM;
915 gfp_flags &= ~__GFP_MOVABLE;
916 gfp_flags &= ~__GFP_COMP;
919 if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
920 gfp_flags |= __GFP_RETRY_MAYFAIL;
926 * On success pages list will hold count number of correctly
927 * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
929 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
930 struct ttm_operation_ctx *ctx)
932 struct ttm_tt *ttm = &ttm_dma->ttm;
933 struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
934 unsigned long num_pages = ttm->num_pages;
935 struct dma_pool *pool;
936 struct dma_page *d_page;
941 if (ttm->state != tt_unpopulated)
944 if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
947 INIT_LIST_HEAD(&ttm_dma->pages_list);
950 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
952 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
953 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
956 pool = ttm_dma_find_pool(dev, type | IS_HUGE);
958 gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true);
960 pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
961 if (IS_ERR_OR_NULL(pool))
965 while (num_pages >= HPAGE_PMD_NR) {
968 d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
972 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
974 if (unlikely(ret != 0)) {
975 ttm_dma_unpopulate(ttm_dma, dev);
979 d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
980 for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
981 ttm->pages[j] = ttm->pages[j - 1] + 1;
982 ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
987 num_pages -= HPAGE_PMD_NR;
993 pool = ttm_dma_find_pool(dev, type);
995 gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false);
997 pool = ttm_dma_pool_init(dev, gfp_flags, type);
998 if (IS_ERR_OR_NULL(pool))
1003 d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
1005 ttm_dma_unpopulate(ttm_dma, dev);
1009 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
1011 if (unlikely(ret != 0)) {
1012 ttm_dma_unpopulate(ttm_dma, dev);
1016 d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
1021 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
1022 ret = ttm_tt_swapin(ttm);
1023 if (unlikely(ret != 0)) {
1024 ttm_dma_unpopulate(ttm_dma, dev);
1029 ttm->state = tt_unbound;
1032 EXPORT_SYMBOL_GPL(ttm_dma_populate);
1034 /* Put all pages in pages list to correct pool to wait for reuse */
1035 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
1037 struct ttm_tt *ttm = &ttm_dma->ttm;
1038 struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
1039 struct dma_pool *pool;
1040 struct dma_page *d_page, *next;
1041 enum pool_type type;
1042 bool is_cached = false;
1043 unsigned count, i, npages = 0;
1044 unsigned long irq_flags;
1046 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
1048 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1049 pool = ttm_dma_find_pool(dev, type | IS_HUGE);
1052 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
1054 if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
1058 if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
1059 ttm_mem_global_free_page(mem_glob, d_page->p,
1061 d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
1063 ttm_dma_page_put(pool, d_page);
1066 spin_lock_irqsave(&pool->lock, irq_flags);
1067 pool->npages_in_use -= count;
1068 pool->nfrees += count;
1069 spin_unlock_irqrestore(&pool->lock, irq_flags);
1073 pool = ttm_dma_find_pool(dev, type);
1077 is_cached = (ttm_dma_find_pool(pool->dev,
1078 ttm_to_type(ttm->page_flags, tt_cached)) == pool);
1080 /* make sure pages array match list and count number of pages */
1082 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
1084 ttm->pages[count] = d_page->p;
1087 if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
1088 ttm_mem_global_free_page(mem_glob, d_page->p,
1090 d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
1094 ttm_dma_page_put(pool, d_page);
1097 spin_lock_irqsave(&pool->lock, irq_flags);
1098 pool->npages_in_use -= count;
1100 pool->nfrees += count;
1102 pool->npages_free += count;
1103 list_splice(&ttm_dma->pages_list, &pool->free_list);
1105 * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
1106 * to free in order to minimize calls to set_memory_wb().
1108 if (pool->npages_free >= (_manager->options.max_size +
1109 NUM_PAGES_TO_ALLOC))
1110 npages = pool->npages_free - _manager->options.max_size;
1112 spin_unlock_irqrestore(&pool->lock, irq_flags);
1114 INIT_LIST_HEAD(&ttm_dma->pages_list);
1115 for (i = 0; i < ttm->num_pages; i++) {
1116 ttm->pages[i] = NULL;
1117 ttm_dma->dma_address[i] = 0;
1120 /* shrink pool if necessary (only on !is_cached pools)*/
1122 ttm_dma_page_pool_free(pool, npages, false);
1123 ttm->state = tt_unpopulated;
1125 EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1128 * Callback for mm to request pool to reduce number of page held.
1130 * XXX: (dchinner) Deadlock warning!
1132 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1135 static unsigned long
1136 ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1138 static unsigned start_pool;
1140 unsigned pool_offset;
1141 unsigned shrink_pages = sc->nr_to_scan;
1142 struct device_pools *p;
1143 unsigned long freed = 0;
1145 if (list_empty(&_manager->pools))
1148 if (!mutex_trylock(&_manager->lock))
1150 if (!_manager->npools)
1152 pool_offset = ++start_pool % _manager->npools;
1153 list_for_each_entry(p, &_manager->pools, pools) {
1158 if (shrink_pages == 0)
1160 /* Do it in round-robin fashion. */
1161 if (++idx < pool_offset)
1163 nr_free = shrink_pages;
1164 /* OK to use static buffer since global mutex is held. */
1165 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
1166 freed += nr_free - shrink_pages;
1168 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1169 p->pool->dev_name, p->pool->name, current->pid,
1170 nr_free, shrink_pages);
1173 mutex_unlock(&_manager->lock);
1177 static unsigned long
1178 ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1180 struct device_pools *p;
1181 unsigned long count = 0;
1183 if (!mutex_trylock(&_manager->lock))
1185 list_for_each_entry(p, &_manager->pools, pools)
1186 count += p->pool->npages_free;
1187 mutex_unlock(&_manager->lock);
1191 static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
1193 manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
1194 manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
1195 manager->mm_shrink.seeks = 1;
1196 return register_shrinker(&manager->mm_shrink);
1199 static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
1201 unregister_shrinker(&manager->mm_shrink);
1204 int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1210 pr_info("Initializing DMA pool allocator\n");
1212 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1216 mutex_init(&_manager->lock);
1217 INIT_LIST_HEAD(&_manager->pools);
1219 _manager->options.max_size = max_pages;
1220 _manager->options.small = SMALL_ALLOCATION;
1221 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
1223 /* This takes care of auto-freeing the _manager */
1224 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1225 &glob->kobj, "dma_pool");
1226 if (unlikely(ret != 0))
1229 ret = ttm_dma_pool_mm_shrink_init(_manager);
1230 if (unlikely(ret != 0))
1235 kobject_put(&_manager->kobj);
1240 void ttm_dma_page_alloc_fini(void)
1242 struct device_pools *p, *t;
1244 pr_info("Finalizing DMA pool allocator\n");
1245 ttm_dma_pool_mm_shrink_fini(_manager);
1247 list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
1248 dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
1250 WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
1251 ttm_dma_pool_match, p->pool));
1252 ttm_dma_free_pool(p->dev, p->pool->type);
1254 kobject_put(&_manager->kobj);
1258 int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1260 struct device_pools *p;
1261 struct dma_pool *pool = NULL;
1264 seq_printf(m, "No pool allocator running.\n");
1267 seq_printf(m, " pool refills pages freed inuse available name\n");
1268 mutex_lock(&_manager->lock);
1269 list_for_each_entry(p, &_manager->pools, pools) {
1270 struct device *dev = p->dev;
1274 seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
1275 pool->name, pool->nrefills,
1276 pool->nfrees, pool->npages_in_use,
1280 mutex_unlock(&_manager->lock);
1283 EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);