1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPU-agnostic AMD IO page table v2 allocator.
5 * Copyright (C) 2022, 2023 Advanced Micro Devices, Inc.
10 #define pr_fmt(fmt) "AMD-Vi: " fmt
11 #define dev_fmt(fmt) pr_fmt(fmt)
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
17 #include <asm/barrier.h>
19 #include "amd_iommu_types.h"
20 #include "amd_iommu.h"
21 #include "../iommu-pages.h"
23 #define IOMMU_PAGE_PRESENT BIT_ULL(0) /* Is present */
24 #define IOMMU_PAGE_RW BIT_ULL(1) /* Writeable */
25 #define IOMMU_PAGE_USER BIT_ULL(2) /* Userspace addressable */
26 #define IOMMU_PAGE_PWT BIT_ULL(3) /* Page write through */
27 #define IOMMU_PAGE_PCD BIT_ULL(4) /* Page cache disabled */
28 #define IOMMU_PAGE_ACCESS BIT_ULL(5) /* Was accessed (updated by IOMMU) */
29 #define IOMMU_PAGE_DIRTY BIT_ULL(6) /* Was written to (updated by IOMMU) */
30 #define IOMMU_PAGE_PSE BIT_ULL(7) /* Page Size Extensions */
31 #define IOMMU_PAGE_NX BIT_ULL(63) /* No execute */
33 #define MAX_PTRS_PER_PAGE 512
35 #define IOMMU_PAGE_SIZE_2M BIT_ULL(21)
36 #define IOMMU_PAGE_SIZE_1G BIT_ULL(30)
39 static inline int get_pgtable_level(void)
41 return amd_iommu_gpt_level;
44 static inline bool is_large_pte(u64 pte)
46 return (pte & IOMMU_PAGE_PSE);
49 static inline u64 set_pgtable_attr(u64 *page)
53 prot = IOMMU_PAGE_PRESENT | IOMMU_PAGE_RW | IOMMU_PAGE_USER;
54 prot |= IOMMU_PAGE_ACCESS;
56 return (iommu_virt_to_phys(page) | prot);
59 static inline void *get_pgtable_pte(u64 pte)
61 return iommu_phys_to_virt(pte & PM_ADDR_MASK);
64 static u64 set_pte_attr(u64 paddr, u64 pg_size, int prot)
68 pte = __sme_set(paddr & PM_ADDR_MASK);
69 pte |= IOMMU_PAGE_PRESENT | IOMMU_PAGE_USER;
70 pte |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
72 if (prot & IOMMU_PROT_IW)
76 if (pg_size == IOMMU_PAGE_SIZE_1G || pg_size == IOMMU_PAGE_SIZE_2M)
77 pte |= IOMMU_PAGE_PSE;
82 static inline u64 get_alloc_page_size(u64 size)
84 if (size >= IOMMU_PAGE_SIZE_1G)
85 return IOMMU_PAGE_SIZE_1G;
87 if (size >= IOMMU_PAGE_SIZE_2M)
88 return IOMMU_PAGE_SIZE_2M;
93 static inline int page_size_to_level(u64 pg_size)
95 if (pg_size == IOMMU_PAGE_SIZE_1G)
96 return PAGE_MODE_3_LEVEL;
97 if (pg_size == IOMMU_PAGE_SIZE_2M)
98 return PAGE_MODE_2_LEVEL;
100 return PAGE_MODE_1_LEVEL;
103 static void free_pgtable(u64 *pt, int level)
108 for (i = 0; i < MAX_PTRS_PER_PAGE; i++) {
110 if (!IOMMU_PTE_PRESENT(pt[i]))
113 if (is_large_pte(pt[i]))
117 * Free the next level. No need to look at l1 tables here since
118 * they can only contain leaf PTEs; just free them directly.
120 p = get_pgtable_pte(pt[i]);
122 free_pgtable(p, level - 1);
130 /* Allocate page table */
131 static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
132 unsigned long pg_size, gfp_t gfp, bool *updated)
135 int level, end_level;
137 level = get_pgtable_level() - 1;
138 end_level = page_size_to_level(pg_size);
139 pte = &pgd[PM_LEVEL_INDEX(level, iova)];
140 iova = PAGE_SIZE_ALIGN(iova, PAGE_SIZE);
142 while (level >= end_level) {
147 if (IOMMU_PTE_PRESENT(__pte) && is_large_pte(__pte)) {
148 /* Unmap large pte */
149 cmpxchg64(pte, *pte, 0ULL);
154 if (!IOMMU_PTE_PRESENT(__pte)) {
155 page = iommu_alloc_page_node(nid, gfp);
159 __npte = set_pgtable_attr(page);
160 /* pte could have been changed somewhere. */
161 if (!try_cmpxchg64(pte, &__pte, __npte))
162 iommu_free_page(page);
163 else if (IOMMU_PTE_PRESENT(__pte))
170 pte = get_pgtable_pte(__pte);
171 pte = &pte[PM_LEVEL_INDEX(level, iova)];
174 /* Tear down existing pte entries */
175 if (IOMMU_PTE_PRESENT(*pte)) {
179 __pte = get_pgtable_pte(*pte);
180 cmpxchg64(pte, *pte, 0ULL);
181 if (pg_size == IOMMU_PAGE_SIZE_1G)
182 free_pgtable(__pte, end_level - 1);
183 else if (pg_size == IOMMU_PAGE_SIZE_2M)
184 iommu_free_page(__pte);
191 * This function checks if there is a PTE for a given dma address.
192 * If there is one, it returns the pointer to it.
194 static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
195 unsigned long iova, unsigned long *page_size)
200 level = get_pgtable_level() - 1;
201 pte = &pgtable->pgd[PM_LEVEL_INDEX(level, iova)];
202 /* Default page size is 4K */
203 *page_size = PAGE_SIZE;
207 if (!IOMMU_PTE_PRESENT(*pte))
210 /* Walk to the next level */
211 pte = get_pgtable_pte(*pte);
212 pte = &pte[PM_LEVEL_INDEX(level - 1, iova)];
215 if (is_large_pte(*pte)) {
216 if (level == PAGE_MODE_3_LEVEL)
217 *page_size = IOMMU_PAGE_SIZE_1G;
218 else if (level == PAGE_MODE_2_LEVEL)
219 *page_size = IOMMU_PAGE_SIZE_2M;
221 return NULL; /* Wrongly set PSE bit in PTE */
232 static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
233 phys_addr_t paddr, size_t pgsize, size_t pgcount,
234 int prot, gfp_t gfp, size_t *mapped)
236 struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
237 struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
239 unsigned long map_size;
240 unsigned long mapped_size = 0;
241 unsigned long o_iova = iova;
242 size_t size = pgcount << __ffs(pgsize);
244 bool updated = false;
246 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize) || !pgcount)
249 if (!(prot & IOMMU_PROT_MASK))
252 while (mapped_size < size) {
253 map_size = get_alloc_page_size(pgsize);
254 pte = v2_alloc_pte(cfg->amd.nid, pgtable->pgd,
255 iova, map_size, gfp, &updated);
261 *pte = set_pte_attr(paddr, map_size, prot);
265 mapped_size += map_size;
270 struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
273 spin_lock_irqsave(&pdom->lock, flags);
274 amd_iommu_domain_flush_pages(pdom, o_iova, size);
275 spin_unlock_irqrestore(&pdom->lock, flags);
279 *mapped += mapped_size;
284 static unsigned long iommu_v2_unmap_pages(struct io_pgtable_ops *ops,
286 size_t pgsize, size_t pgcount,
287 struct iommu_iotlb_gather *gather)
289 struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
290 struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
291 unsigned long unmap_size;
292 unsigned long unmapped = 0;
293 size_t size = pgcount << __ffs(pgsize);
296 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
299 while (unmapped < size) {
300 pte = fetch_pte(pgtable, iova, &unmap_size);
306 iova = (iova & ~(unmap_size - 1)) + unmap_size;
307 unmapped += unmap_size;
313 static phys_addr_t iommu_v2_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
315 struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
316 unsigned long offset_mask, pte_pgsize;
319 pte = fetch_pte(pgtable, iova, &pte_pgsize);
320 if (!pte || !IOMMU_PTE_PRESENT(*pte))
323 offset_mask = pte_pgsize - 1;
324 __pte = __sme_clr(*pte & PM_ADDR_MASK);
326 return (__pte & ~offset_mask) | (iova & offset_mask);
330 * ----------------------------------------------------
332 static void v2_free_pgtable(struct io_pgtable *iop)
334 struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl);
336 if (!pgtable || !pgtable->pgd)
339 /* Free page table */
340 free_pgtable(pgtable->pgd, get_pgtable_level());
344 static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
346 struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
347 int ias = IOMMU_IN_ADDR_BIT_SIZE;
349 pgtable->pgd = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
353 if (get_pgtable_level() == PAGE_MODE_5_LEVEL)
356 pgtable->pgtbl.ops.map_pages = iommu_v2_map_pages;
357 pgtable->pgtbl.ops.unmap_pages = iommu_v2_unmap_pages;
358 pgtable->pgtbl.ops.iova_to_phys = iommu_v2_iova_to_phys;
360 cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
362 cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE;
364 return &pgtable->pgtbl;
367 struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = {
368 .alloc = v2_alloc_pgtable,
369 .free = v2_free_pgtable,