]> Git Repo - linux.git/blob - drivers/iommu/amd/io_pgtable_v2.c
Merge patch series "riscv: Extension parsing fixes"
[linux.git] / drivers / iommu / amd / io_pgtable_v2.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPU-agnostic AMD IO page table v2 allocator.
4  *
5  * Copyright (C) 2022, 2023 Advanced Micro Devices, Inc.
6  * Author: Suravee Suthikulpanit <[email protected]>
7  * Author: Vasant Hegde <[email protected]>
8  */
9
10 #define pr_fmt(fmt)     "AMD-Vi: " fmt
11 #define dev_fmt(fmt)    pr_fmt(fmt)
12
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16
17 #include <asm/barrier.h>
18
19 #include "amd_iommu_types.h"
20 #include "amd_iommu.h"
21 #include "../iommu-pages.h"
22
23 #define IOMMU_PAGE_PRESENT      BIT_ULL(0)      /* Is present */
24 #define IOMMU_PAGE_RW           BIT_ULL(1)      /* Writeable */
25 #define IOMMU_PAGE_USER         BIT_ULL(2)      /* Userspace addressable */
26 #define IOMMU_PAGE_PWT          BIT_ULL(3)      /* Page write through */
27 #define IOMMU_PAGE_PCD          BIT_ULL(4)      /* Page cache disabled */
28 #define IOMMU_PAGE_ACCESS       BIT_ULL(5)      /* Was accessed (updated by IOMMU) */
29 #define IOMMU_PAGE_DIRTY        BIT_ULL(6)      /* Was written to (updated by IOMMU) */
30 #define IOMMU_PAGE_PSE          BIT_ULL(7)      /* Page Size Extensions */
31 #define IOMMU_PAGE_NX           BIT_ULL(63)     /* No execute */
32
33 #define MAX_PTRS_PER_PAGE       512
34
35 #define IOMMU_PAGE_SIZE_2M      BIT_ULL(21)
36 #define IOMMU_PAGE_SIZE_1G      BIT_ULL(30)
37
38
39 static inline int get_pgtable_level(void)
40 {
41         return amd_iommu_gpt_level;
42 }
43
44 static inline bool is_large_pte(u64 pte)
45 {
46         return (pte & IOMMU_PAGE_PSE);
47 }
48
49 static inline u64 set_pgtable_attr(u64 *page)
50 {
51         u64 prot;
52
53         prot = IOMMU_PAGE_PRESENT | IOMMU_PAGE_RW | IOMMU_PAGE_USER;
54         prot |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
55
56         return (iommu_virt_to_phys(page) | prot);
57 }
58
59 static inline void *get_pgtable_pte(u64 pte)
60 {
61         return iommu_phys_to_virt(pte & PM_ADDR_MASK);
62 }
63
64 static u64 set_pte_attr(u64 paddr, u64 pg_size, int prot)
65 {
66         u64 pte;
67
68         pte = __sme_set(paddr & PM_ADDR_MASK);
69         pte |= IOMMU_PAGE_PRESENT | IOMMU_PAGE_USER;
70         pte |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
71
72         if (prot & IOMMU_PROT_IW)
73                 pte |= IOMMU_PAGE_RW;
74
75         /* Large page */
76         if (pg_size == IOMMU_PAGE_SIZE_1G || pg_size == IOMMU_PAGE_SIZE_2M)
77                 pte |= IOMMU_PAGE_PSE;
78
79         return pte;
80 }
81
82 static inline u64 get_alloc_page_size(u64 size)
83 {
84         if (size >= IOMMU_PAGE_SIZE_1G)
85                 return IOMMU_PAGE_SIZE_1G;
86
87         if (size >= IOMMU_PAGE_SIZE_2M)
88                 return IOMMU_PAGE_SIZE_2M;
89
90         return PAGE_SIZE;
91 }
92
93 static inline int page_size_to_level(u64 pg_size)
94 {
95         if (pg_size == IOMMU_PAGE_SIZE_1G)
96                 return PAGE_MODE_3_LEVEL;
97         if (pg_size == IOMMU_PAGE_SIZE_2M)
98                 return PAGE_MODE_2_LEVEL;
99
100         return PAGE_MODE_1_LEVEL;
101 }
102
103 static void free_pgtable(u64 *pt, int level)
104 {
105         u64 *p;
106         int i;
107
108         for (i = 0; i < MAX_PTRS_PER_PAGE; i++) {
109                 /* PTE present? */
110                 if (!IOMMU_PTE_PRESENT(pt[i]))
111                         continue;
112
113                 if (is_large_pte(pt[i]))
114                         continue;
115
116                 /*
117                  * Free the next level. No need to look at l1 tables here since
118                  * they can only contain leaf PTEs; just free them directly.
119                  */
120                 p = get_pgtable_pte(pt[i]);
121                 if (level > 2)
122                         free_pgtable(p, level - 1);
123                 else
124                         iommu_free_page(p);
125         }
126
127         iommu_free_page(pt);
128 }
129
130 /* Allocate page table */
131 static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
132                          unsigned long pg_size, gfp_t gfp, bool *updated)
133 {
134         u64 *pte, *page;
135         int level, end_level;
136
137         level = get_pgtable_level() - 1;
138         end_level = page_size_to_level(pg_size);
139         pte = &pgd[PM_LEVEL_INDEX(level, iova)];
140         iova = PAGE_SIZE_ALIGN(iova, PAGE_SIZE);
141
142         while (level >= end_level) {
143                 u64 __pte, __npte;
144
145                 __pte = *pte;
146
147                 if (IOMMU_PTE_PRESENT(__pte) && is_large_pte(__pte)) {
148                         /* Unmap large pte */
149                         cmpxchg64(pte, *pte, 0ULL);
150                         *updated = true;
151                         continue;
152                 }
153
154                 if (!IOMMU_PTE_PRESENT(__pte)) {
155                         page = iommu_alloc_page_node(nid, gfp);
156                         if (!page)
157                                 return NULL;
158
159                         __npte = set_pgtable_attr(page);
160                         /* pte could have been changed somewhere. */
161                         if (cmpxchg64(pte, __pte, __npte) != __pte)
162                                 iommu_free_page(page);
163                         else if (IOMMU_PTE_PRESENT(__pte))
164                                 *updated = true;
165
166                         continue;
167                 }
168
169                 level -= 1;
170                 pte = get_pgtable_pte(__pte);
171                 pte = &pte[PM_LEVEL_INDEX(level, iova)];
172         }
173
174         /* Tear down existing pte entries */
175         if (IOMMU_PTE_PRESENT(*pte)) {
176                 u64 *__pte;
177
178                 *updated = true;
179                 __pte = get_pgtable_pte(*pte);
180                 cmpxchg64(pte, *pte, 0ULL);
181                 if (pg_size == IOMMU_PAGE_SIZE_1G)
182                         free_pgtable(__pte, end_level - 1);
183                 else if (pg_size == IOMMU_PAGE_SIZE_2M)
184                         iommu_free_page(__pte);
185         }
186
187         return pte;
188 }
189
190 /*
191  * This function checks if there is a PTE for a given dma address.
192  * If there is one, it returns the pointer to it.
193  */
194 static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
195                       unsigned long iova, unsigned long *page_size)
196 {
197         u64 *pte;
198         int level;
199
200         level = get_pgtable_level() - 1;
201         pte = &pgtable->pgd[PM_LEVEL_INDEX(level, iova)];
202         /* Default page size is 4K */
203         *page_size = PAGE_SIZE;
204
205         while (level) {
206                 /* Not present */
207                 if (!IOMMU_PTE_PRESENT(*pte))
208                         return NULL;
209
210                 /* Walk to the next level */
211                 pte = get_pgtable_pte(*pte);
212                 pte = &pte[PM_LEVEL_INDEX(level - 1, iova)];
213
214                 /* Large page */
215                 if (is_large_pte(*pte)) {
216                         if (level == PAGE_MODE_3_LEVEL)
217                                 *page_size = IOMMU_PAGE_SIZE_1G;
218                         else if (level == PAGE_MODE_2_LEVEL)
219                                 *page_size = IOMMU_PAGE_SIZE_2M;
220                         else
221                                 return NULL;    /* Wrongly set PSE bit in PTE */
222
223                         break;
224                 }
225
226                 level -= 1;
227         }
228
229         return pte;
230 }
231
232 static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
233                               phys_addr_t paddr, size_t pgsize, size_t pgcount,
234                               int prot, gfp_t gfp, size_t *mapped)
235 {
236         struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
237         struct io_pgtable_cfg *cfg = &pdom->iop.iop.cfg;
238         u64 *pte;
239         unsigned long map_size;
240         unsigned long mapped_size = 0;
241         unsigned long o_iova = iova;
242         size_t size = pgcount << __ffs(pgsize);
243         int ret = 0;
244         bool updated = false;
245
246         if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize) || !pgcount)
247                 return -EINVAL;
248
249         if (!(prot & IOMMU_PROT_MASK))
250                 return -EINVAL;
251
252         while (mapped_size < size) {
253                 map_size = get_alloc_page_size(pgsize);
254                 pte = v2_alloc_pte(pdom->nid, pdom->iop.pgd,
255                                    iova, map_size, gfp, &updated);
256                 if (!pte) {
257                         ret = -EINVAL;
258                         goto out;
259                 }
260
261                 *pte = set_pte_attr(paddr, map_size, prot);
262
263                 iova += map_size;
264                 paddr += map_size;
265                 mapped_size += map_size;
266         }
267
268 out:
269         if (updated)
270                 amd_iommu_domain_flush_pages(pdom, o_iova, size);
271
272         if (mapped)
273                 *mapped += mapped_size;
274
275         return ret;
276 }
277
278 static unsigned long iommu_v2_unmap_pages(struct io_pgtable_ops *ops,
279                                           unsigned long iova,
280                                           size_t pgsize, size_t pgcount,
281                                           struct iommu_iotlb_gather *gather)
282 {
283         struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
284         struct io_pgtable_cfg *cfg = &pgtable->iop.cfg;
285         unsigned long unmap_size;
286         unsigned long unmapped = 0;
287         size_t size = pgcount << __ffs(pgsize);
288         u64 *pte;
289
290         if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
291                 return 0;
292
293         while (unmapped < size) {
294                 pte = fetch_pte(pgtable, iova, &unmap_size);
295                 if (!pte)
296                         return unmapped;
297
298                 *pte = 0ULL;
299
300                 iova = (iova & ~(unmap_size - 1)) + unmap_size;
301                 unmapped += unmap_size;
302         }
303
304         return unmapped;
305 }
306
307 static phys_addr_t iommu_v2_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
308 {
309         struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
310         unsigned long offset_mask, pte_pgsize;
311         u64 *pte, __pte;
312
313         pte = fetch_pte(pgtable, iova, &pte_pgsize);
314         if (!pte || !IOMMU_PTE_PRESENT(*pte))
315                 return 0;
316
317         offset_mask = pte_pgsize - 1;
318         __pte = __sme_clr(*pte & PM_ADDR_MASK);
319
320         return (__pte & ~offset_mask) | (iova & offset_mask);
321 }
322
323 /*
324  * ----------------------------------------------------
325  */
326 static void v2_tlb_flush_all(void *cookie)
327 {
328 }
329
330 static void v2_tlb_flush_walk(unsigned long iova, size_t size,
331                               size_t granule, void *cookie)
332 {
333 }
334
335 static void v2_tlb_add_page(struct iommu_iotlb_gather *gather,
336                             unsigned long iova, size_t granule,
337                             void *cookie)
338 {
339 }
340
341 static const struct iommu_flush_ops v2_flush_ops = {
342         .tlb_flush_all  = v2_tlb_flush_all,
343         .tlb_flush_walk = v2_tlb_flush_walk,
344         .tlb_add_page   = v2_tlb_add_page,
345 };
346
347 static void v2_free_pgtable(struct io_pgtable *iop)
348 {
349         struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
350
351         if (!pgtable || !pgtable->pgd)
352                 return;
353
354         /* Free page table */
355         free_pgtable(pgtable->pgd, get_pgtable_level());
356         pgtable->pgd = NULL;
357 }
358
359 static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
360 {
361         struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
362         struct protection_domain *pdom = (struct protection_domain *)cookie;
363         int ias = IOMMU_IN_ADDR_BIT_SIZE;
364
365         pgtable->pgd = iommu_alloc_page_node(pdom->nid, GFP_ATOMIC);
366         if (!pgtable->pgd)
367                 return NULL;
368
369         if (get_pgtable_level() == PAGE_MODE_5_LEVEL)
370                 ias = 57;
371
372         pgtable->iop.ops.map_pages    = iommu_v2_map_pages;
373         pgtable->iop.ops.unmap_pages  = iommu_v2_unmap_pages;
374         pgtable->iop.ops.iova_to_phys = iommu_v2_iova_to_phys;
375
376         cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2,
377         cfg->ias           = ias,
378         cfg->oas           = IOMMU_OUT_ADDR_BIT_SIZE,
379         cfg->tlb           = &v2_flush_ops;
380
381         return &pgtable->iop;
382 }
383
384 struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = {
385         .alloc  = v2_alloc_pgtable,
386         .free   = v2_free_pgtable,
387 };
This page took 0.065269 seconds and 4 git commands to generate.