1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPU-agnostic AMD IO page table allocator.
5 * Copyright (C) 2020 Advanced Micro Devices, Inc.
9 #define pr_fmt(fmt) "AMD-Vi: " fmt
10 #define dev_fmt(fmt) pr_fmt(fmt)
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/dma-mapping.h>
21 #include <asm/barrier.h>
23 #include "amd_iommu_types.h"
24 #include "amd_iommu.h"
25 #include "../iommu-pages.h"
27 static void v1_tlb_flush_all(void *cookie)
31 static void v1_tlb_flush_walk(unsigned long iova, size_t size,
32 size_t granule, void *cookie)
36 static void v1_tlb_add_page(struct iommu_iotlb_gather *gather,
37 unsigned long iova, size_t granule,
42 static const struct iommu_flush_ops v1_flush_ops = {
43 .tlb_flush_all = v1_tlb_flush_all,
44 .tlb_flush_walk = v1_tlb_flush_walk,
45 .tlb_add_page = v1_tlb_add_page,
49 * Helper function to get the first pte of a large mapping
51 static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
54 unsigned long pte_mask, pg_size, cnt;
57 pg_size = PTE_PAGE_SIZE(*pte);
58 cnt = PAGE_SIZE_PTE_COUNT(pg_size);
59 pte_mask = ~((cnt << 3) - 1);
60 fpte = (u64 *)(((unsigned long)pte) & pte_mask);
71 /****************************************************************************
73 * The functions below are used the create the page table mappings for
74 * unity mapped regions.
76 ****************************************************************************/
78 static void free_pt_page(u64 *pt, struct list_head *freelist)
80 struct page *p = virt_to_page(pt);
82 list_add_tail(&p->lru, freelist);
85 static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
90 for (i = 0; i < 512; ++i) {
92 if (!IOMMU_PTE_PRESENT(pt[i]))
96 if (PM_PTE_LEVEL(pt[i]) == 0 ||
97 PM_PTE_LEVEL(pt[i]) == 7)
101 * Free the next level. No need to look at l1 tables here since
102 * they can only contain leaf PTEs; just free them directly.
104 p = IOMMU_PTE_PAGE(pt[i]);
106 free_pt_lvl(p, freelist, lvl - 1);
108 free_pt_page(p, freelist);
111 free_pt_page(pt, freelist);
114 static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
118 case PAGE_MODE_7_LEVEL:
120 case PAGE_MODE_1_LEVEL:
121 free_pt_page(root, freelist);
123 case PAGE_MODE_2_LEVEL:
124 case PAGE_MODE_3_LEVEL:
125 case PAGE_MODE_4_LEVEL:
126 case PAGE_MODE_5_LEVEL:
127 case PAGE_MODE_6_LEVEL:
128 free_pt_lvl(root, freelist, mode);
135 void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
140 /* lowest 3 bits encode pgtable mode */
142 pt_root |= (u64)root;
144 amd_iommu_domain_set_pt_root(domain, pt_root);
148 * This function is used to add another level to an IO page table. Adding
149 * another level increases the size of the address space by 9 bits to a size up
152 static bool increase_address_space(struct protection_domain *domain,
153 unsigned long address,
160 pte = iommu_alloc_page_node(domain->nid, gfp);
164 spin_lock_irqsave(&domain->lock, flags);
166 if (address <= PM_LEVEL_SIZE(domain->iop.mode))
170 if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL))
173 *pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root));
175 domain->iop.root = pte;
176 domain->iop.mode += 1;
177 amd_iommu_update_and_flush_device_table(domain);
178 amd_iommu_domain_flush_complete(domain);
181 * Device Table needs to be updated and flushed before the new root can
184 amd_iommu_domain_set_pgtable(domain, pte, domain->iop.mode);
190 spin_unlock_irqrestore(&domain->lock, flags);
191 iommu_free_page(pte);
196 static u64 *alloc_pte(struct protection_domain *domain,
197 unsigned long address,
198 unsigned long page_size,
206 BUG_ON(!is_power_of_2(page_size));
208 while (address > PM_LEVEL_SIZE(domain->iop.mode)) {
210 * Return an error if there is no memory to update the
213 if (!increase_address_space(domain, address, gfp))
218 level = domain->iop.mode - 1;
219 pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
220 address = PAGE_SIZE_ALIGN(address, page_size);
221 end_lvl = PAGE_SIZE_LEVEL(page_size);
223 while (level > end_lvl) {
228 pte_level = PM_PTE_LEVEL(__pte);
231 * If we replace a series of large PTEs, we need
232 * to tear down all of them.
234 if (IOMMU_PTE_PRESENT(__pte) &&
235 pte_level == PAGE_MODE_7_LEVEL) {
236 unsigned long count, i;
239 lpte = first_pte_l7(pte, NULL, &count);
242 * Unmap the replicated PTEs that still match the
243 * original large mapping
245 for (i = 0; i < count; ++i)
246 cmpxchg64(&lpte[i], __pte, 0ULL);
252 if (!IOMMU_PTE_PRESENT(__pte) ||
253 pte_level == PAGE_MODE_NONE) {
254 page = iommu_alloc_page_node(domain->nid, gfp);
259 __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
261 /* pte could have been changed somewhere. */
262 if (!try_cmpxchg64(pte, &__pte, __npte))
263 iommu_free_page(page);
264 else if (IOMMU_PTE_PRESENT(__pte))
270 /* No level skipping support yet */
271 if (pte_level != level)
276 pte = IOMMU_PTE_PAGE(__pte);
278 if (pte_page && level == end_lvl)
281 pte = &pte[PM_LEVEL_INDEX(level, address)];
288 * This function checks if there is a PTE for a given dma address. If
289 * there is one, it returns the pointer to it.
291 static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
292 unsigned long address,
293 unsigned long *page_size)
300 if (address > PM_LEVEL_SIZE(pgtable->mode))
303 level = pgtable->mode - 1;
304 pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
305 *page_size = PTE_LEVEL_PAGE_SIZE(level);
310 if (!IOMMU_PTE_PRESENT(*pte))
314 if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL ||
315 PM_PTE_LEVEL(*pte) == PAGE_MODE_NONE)
318 /* No level skipping support yet */
319 if (PM_PTE_LEVEL(*pte) != level)
324 /* Walk to the next level */
325 pte = IOMMU_PTE_PAGE(*pte);
326 pte = &pte[PM_LEVEL_INDEX(level, address)];
327 *page_size = PTE_LEVEL_PAGE_SIZE(level);
331 * If we have a series of large PTEs, make
332 * sure to return a pointer to the first one.
334 if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
335 pte = first_pte_l7(pte, page_size, NULL);
340 static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
345 while (!try_cmpxchg64(pte, &pteval, 0))
346 pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
348 if (!IOMMU_PTE_PRESENT(pteval))
351 pt = IOMMU_PTE_PAGE(pteval);
352 mode = IOMMU_PTE_MODE(pteval);
354 free_sub_pt(pt, mode, freelist);
358 * Generic mapping functions. It maps a physical address into a DMA
359 * address space. It allocates the page table pages if necessary.
360 * In the future it can be extended to a generic mapping function
361 * supporting all features of AMD IOMMU page tables like level skipping
362 * and full 64 bit address spaces.
364 static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
365 phys_addr_t paddr, size_t pgsize, size_t pgcount,
366 int prot, gfp_t gfp, size_t *mapped)
368 struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
370 bool updated = false;
373 size_t size = pgcount << __ffs(pgsize);
374 unsigned long o_iova = iova;
376 BUG_ON(!IS_ALIGNED(iova, pgsize));
377 BUG_ON(!IS_ALIGNED(paddr, pgsize));
380 if (!(prot & IOMMU_PROT_MASK))
383 while (pgcount > 0) {
384 count = PAGE_SIZE_PTE_COUNT(pgsize);
385 pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated);
391 for (i = 0; i < count; ++i)
392 free_clear_pte(&pte[i], pte[i], &freelist);
394 if (!list_empty(&freelist))
398 __pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
399 __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
401 __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
403 if (prot & IOMMU_PROT_IR)
404 __pte |= IOMMU_PTE_IR;
405 if (prot & IOMMU_PROT_IW)
406 __pte |= IOMMU_PTE_IW;
408 for (i = 0; i < count; ++i)
424 spin_lock_irqsave(&dom->lock, flags);
426 * Flush domain TLB(s) and wait for completion. Any Device-Table
427 * Updates and flushing already happened in
428 * increase_address_space().
430 amd_iommu_domain_flush_pages(dom, o_iova, size);
431 spin_unlock_irqrestore(&dom->lock, flags);
434 /* Everything flushed out, free pages now */
435 iommu_put_pages_list(&freelist);
440 static unsigned long iommu_v1_unmap_pages(struct io_pgtable_ops *ops,
442 size_t pgsize, size_t pgcount,
443 struct iommu_iotlb_gather *gather)
445 struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
446 unsigned long long unmapped;
447 unsigned long unmap_size;
449 size_t size = pgcount << __ffs(pgsize);
451 BUG_ON(!is_power_of_2(pgsize));
455 while (unmapped < size) {
456 pte = fetch_pte(pgtable, iova, &unmap_size);
460 count = PAGE_SIZE_PTE_COUNT(unmap_size);
461 for (i = 0; i < count; i++)
467 iova = (iova & ~(unmap_size - 1)) + unmap_size;
468 unmapped += unmap_size;
474 static phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
476 struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
477 unsigned long offset_mask, pte_pgsize;
480 pte = fetch_pte(pgtable, iova, &pte_pgsize);
482 if (!pte || !IOMMU_PTE_PRESENT(*pte))
485 offset_mask = pte_pgsize - 1;
486 __pte = __sme_clr(*pte & PM_ADDR_MASK);
488 return (__pte & ~offset_mask) | (iova & offset_mask);
491 static bool pte_test_and_clear_dirty(u64 *ptep, unsigned long size,
494 bool test_only = flags & IOMMU_DIRTY_NO_CLEAR;
499 * 2.2.3.2 Host Dirty Support
500 * When a non-default page size is used , software must OR the
501 * Dirty bits in all of the replicated host PTEs used to map
502 * the page. The IOMMU does not guarantee the Dirty bits are
503 * set in all of the replicated PTEs. Any portion of the page
504 * may have been written even if the Dirty bit is set in only
505 * one of the replicated PTEs.
507 count = PAGE_SIZE_PTE_COUNT(size);
508 for (i = 0; i < count && test_only; i++) {
509 if (test_bit(IOMMU_PTE_HD_BIT, (unsigned long *)&ptep[i])) {
515 for (i = 0; i < count && !test_only; i++) {
516 if (test_and_clear_bit(IOMMU_PTE_HD_BIT,
517 (unsigned long *)&ptep[i])) {
525 static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops,
526 unsigned long iova, size_t size,
528 struct iommu_dirty_bitmap *dirty)
530 struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
531 unsigned long end = iova + size - 1;
534 unsigned long pgsize = 0;
537 ptep = fetch_pte(pgtable, iova, &pgsize);
539 pte = READ_ONCE(*ptep);
540 if (!ptep || !IOMMU_PTE_PRESENT(pte)) {
541 pgsize = pgsize ?: PTE_LEVEL_PAGE_SIZE(0);
547 * Mark the whole IOVA range as dirty even if only one of
548 * the replicated PTEs were marked dirty.
550 if (pte_test_and_clear_dirty(ptep, pgsize, flags))
551 iommu_dirty_bitmap_record(dirty, iova, pgsize);
553 } while (iova < end);
559 * ----------------------------------------------------
561 static void v1_free_pgtable(struct io_pgtable *iop)
563 struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
564 struct protection_domain *dom;
567 if (pgtable->mode == PAGE_MODE_NONE)
570 dom = container_of(pgtable, struct protection_domain, iop);
572 /* Page-table is not visible to IOMMU anymore, so free it */
573 BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
574 pgtable->mode > PAGE_MODE_6_LEVEL);
576 free_sub_pt(pgtable->root, pgtable->mode, &freelist);
578 /* Update data structure */
579 amd_iommu_domain_clr_pt_root(dom);
581 /* Make changes visible to IOMMUs */
582 amd_iommu_domain_update(dom);
584 iommu_put_pages_list(&freelist);
587 static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
589 struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
591 cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES,
592 cfg->ias = IOMMU_IN_ADDR_BIT_SIZE,
593 cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
594 cfg->tlb = &v1_flush_ops;
596 pgtable->iop.ops.map_pages = iommu_v1_map_pages;
597 pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages;
598 pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
599 pgtable->iop.ops.read_and_clear_dirty = iommu_v1_read_and_clear_dirty;
601 return &pgtable->iop;
604 struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = {
605 .alloc = v1_alloc_pgtable,
606 .free = v1_free_pgtable,