2 * CPU-agnostic ARM page table allocator.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2014 ARM Limited
21 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
23 #include <linux/iommu.h>
24 #include <linux/kernel.h>
25 #include <linux/sizes.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
29 #include <asm/barrier.h>
31 #include "io-pgtable.h"
33 #define ARM_LPAE_MAX_ADDR_BITS 48
34 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
35 #define ARM_LPAE_MAX_LEVELS 4
37 /* Struct accessors */
38 #define io_pgtable_to_data(x) \
39 container_of((x), struct arm_lpae_io_pgtable, iop)
41 #define io_pgtable_ops_to_data(x) \
42 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
45 * For consistency with the architecture, we always consider
46 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
48 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
51 * Calculate the right shift amount to get to the portion describing level l
52 * in a virtual address mapped by the pagetable in d.
54 #define ARM_LPAE_LVL_SHIFT(l,d) \
55 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
56 * (d)->bits_per_level) + (d)->pg_shift)
58 #define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
60 #define ARM_LPAE_PAGES_PER_PGD(d) \
61 DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
64 * Calculate the index at level l used to map virtual address a using the
67 #define ARM_LPAE_PGD_IDX(l,d) \
68 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
70 #define ARM_LPAE_LVL_IDX(a,l,d) \
71 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
72 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
74 /* Calculate the block/page mapping size at level l for pagetable in d. */
75 #define ARM_LPAE_BLOCK_SIZE(l,d) \
76 (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
77 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
80 #define ARM_LPAE_PTE_TYPE_SHIFT 0
81 #define ARM_LPAE_PTE_TYPE_MASK 0x3
83 #define ARM_LPAE_PTE_TYPE_BLOCK 1
84 #define ARM_LPAE_PTE_TYPE_TABLE 3
85 #define ARM_LPAE_PTE_TYPE_PAGE 3
87 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
88 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
89 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
90 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
91 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
92 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
93 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
94 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
96 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
97 /* Ignore the contiguous bit for block splitting */
98 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
99 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
100 ARM_LPAE_PTE_ATTR_HI_MASK)
103 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
104 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
105 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
106 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
109 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
110 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
111 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
112 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
113 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
114 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
117 #define ARM_32_LPAE_TCR_EAE (1 << 31)
118 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
120 #define ARM_LPAE_TCR_EPD1 (1 << 23)
122 #define ARM_LPAE_TCR_TG0_4K (0 << 14)
123 #define ARM_LPAE_TCR_TG0_64K (1 << 14)
124 #define ARM_LPAE_TCR_TG0_16K (2 << 14)
126 #define ARM_LPAE_TCR_SH0_SHIFT 12
127 #define ARM_LPAE_TCR_SH0_MASK 0x3
128 #define ARM_LPAE_TCR_SH_NS 0
129 #define ARM_LPAE_TCR_SH_OS 2
130 #define ARM_LPAE_TCR_SH_IS 3
132 #define ARM_LPAE_TCR_ORGN0_SHIFT 10
133 #define ARM_LPAE_TCR_IRGN0_SHIFT 8
134 #define ARM_LPAE_TCR_RGN_MASK 0x3
135 #define ARM_LPAE_TCR_RGN_NC 0
136 #define ARM_LPAE_TCR_RGN_WBWA 1
137 #define ARM_LPAE_TCR_RGN_WT 2
138 #define ARM_LPAE_TCR_RGN_WB 3
140 #define ARM_LPAE_TCR_SL0_SHIFT 6
141 #define ARM_LPAE_TCR_SL0_MASK 0x3
143 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
144 #define ARM_LPAE_TCR_SZ_MASK 0xf
146 #define ARM_LPAE_TCR_PS_SHIFT 16
147 #define ARM_LPAE_TCR_PS_MASK 0x7
149 #define ARM_LPAE_TCR_IPS_SHIFT 32
150 #define ARM_LPAE_TCR_IPS_MASK 0x7
152 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
153 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
154 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
155 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
156 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
157 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
159 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
160 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
161 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
162 #define ARM_LPAE_MAIR_ATTR_NC 0x44
163 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
164 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
165 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
166 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
168 /* IOPTE accessors */
169 #define iopte_deref(pte,d) \
170 (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
171 & ~(ARM_LPAE_GRANULE(d) - 1ULL)))
173 #define iopte_type(pte,l) \
174 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
176 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
178 #define iopte_leaf(pte,l) \
179 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
180 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
181 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
183 #define iopte_to_pfn(pte,d) \
184 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
186 #define pfn_to_iopte(pfn,d) \
187 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
189 struct arm_lpae_io_pgtable {
190 struct io_pgtable iop;
194 unsigned long pg_shift;
195 unsigned long bits_per_level;
200 typedef u64 arm_lpae_iopte;
202 static bool selftest_running = false;
204 static dma_addr_t __arm_lpae_dma_addr(void *pages)
206 return (dma_addr_t)virt_to_phys(pages);
209 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
210 struct io_pgtable_cfg *cfg)
212 struct device *dev = cfg->iommu_dev;
214 void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
219 if (!selftest_running) {
220 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
221 if (dma_mapping_error(dev, dma))
224 * We depend on the IOMMU being able to work with any physical
225 * address directly, so if the DMA layer suggests otherwise by
226 * translating or truncating them, that bodes very badly...
228 if (dma != virt_to_phys(pages))
235 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
236 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
238 free_pages_exact(pages, size);
242 static void __arm_lpae_free_pages(void *pages, size_t size,
243 struct io_pgtable_cfg *cfg)
245 if (!selftest_running)
246 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
247 size, DMA_TO_DEVICE);
248 free_pages_exact(pages, size);
251 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
252 struct io_pgtable_cfg *cfg)
256 if (!selftest_running)
257 dma_sync_single_for_device(cfg->iommu_dev,
258 __arm_lpae_dma_addr(ptep),
259 sizeof(pte), DMA_TO_DEVICE);
262 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
263 unsigned long iova, size_t size, int lvl,
264 arm_lpae_iopte *ptep);
266 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
267 unsigned long iova, phys_addr_t paddr,
268 arm_lpae_iopte prot, int lvl,
269 arm_lpae_iopte *ptep)
271 arm_lpae_iopte pte = prot;
272 struct io_pgtable_cfg *cfg = &data->iop.cfg;
274 if (iopte_leaf(*ptep, lvl)) {
275 /* We require an unmap first */
276 WARN_ON(!selftest_running);
278 } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
280 * We need to unmap and free the old table before
281 * overwriting it with a block entry.
283 arm_lpae_iopte *tblp;
284 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
286 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
287 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
291 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
292 pte |= ARM_LPAE_PTE_NS;
294 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
295 pte |= ARM_LPAE_PTE_TYPE_PAGE;
297 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
299 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
300 pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
302 __arm_lpae_set_pte(ptep, pte, cfg);
306 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
307 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
308 int lvl, arm_lpae_iopte *ptep)
310 arm_lpae_iopte *cptep, pte;
311 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
312 struct io_pgtable_cfg *cfg = &data->iop.cfg;
314 /* Find our entry at the current level */
315 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
317 /* If we can install a leaf entry at this level, then do so */
318 if (size == block_size && (size & cfg->pgsize_bitmap))
319 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
321 /* We can't allocate tables at the final level */
322 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
325 /* Grab a pointer to the next level */
328 cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data),
333 pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
334 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
335 pte |= ARM_LPAE_PTE_NSTABLE;
336 __arm_lpae_set_pte(ptep, pte, cfg);
338 cptep = iopte_deref(pte, data);
342 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
345 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
350 if (data->iop.fmt == ARM_64_LPAE_S1 ||
351 data->iop.fmt == ARM_32_LPAE_S1) {
352 pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
354 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
355 pte |= ARM_LPAE_PTE_AP_RDONLY;
357 if (prot & IOMMU_CACHE)
358 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
359 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
361 pte = ARM_LPAE_PTE_HAP_FAULT;
362 if (prot & IOMMU_READ)
363 pte |= ARM_LPAE_PTE_HAP_READ;
364 if (prot & IOMMU_WRITE)
365 pte |= ARM_LPAE_PTE_HAP_WRITE;
366 if (prot & IOMMU_CACHE)
367 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
369 pte |= ARM_LPAE_PTE_MEMATTR_NC;
372 if (prot & IOMMU_NOEXEC)
373 pte |= ARM_LPAE_PTE_XN;
378 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
379 phys_addr_t paddr, size_t size, int iommu_prot)
381 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
382 arm_lpae_iopte *ptep = data->pgd;
383 int ret, lvl = ARM_LPAE_START_LVL(data);
386 /* If no access, then nothing to do */
387 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
390 prot = arm_lpae_prot_to_pte(data, iommu_prot);
391 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
393 * Synchronise all PTE updates for the new mapping before there's
394 * a chance for anything to kick off a table walk for the new iova.
401 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
402 arm_lpae_iopte *ptep)
404 arm_lpae_iopte *start, *end;
405 unsigned long table_size;
407 if (lvl == ARM_LPAE_START_LVL(data))
408 table_size = data->pgd_size;
410 table_size = ARM_LPAE_GRANULE(data);
414 /* Only leaf entries at the last level */
415 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
418 end = (void *)ptep + table_size;
420 while (ptep != end) {
421 arm_lpae_iopte pte = *ptep++;
423 if (!pte || iopte_leaf(pte, lvl))
426 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
429 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
432 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
434 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
436 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
440 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
441 unsigned long iova, size_t size,
442 arm_lpae_iopte prot, int lvl,
443 arm_lpae_iopte *ptep, size_t blk_size)
445 unsigned long blk_start, blk_end;
446 phys_addr_t blk_paddr;
447 arm_lpae_iopte table = 0;
448 struct io_pgtable_cfg *cfg = &data->iop.cfg;
450 blk_start = iova & ~(blk_size - 1);
451 blk_end = blk_start + blk_size;
452 blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
454 for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
455 arm_lpae_iopte *tablep;
458 if (blk_start == iova)
461 /* __arm_lpae_map expects a pointer to the start of the table */
462 tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
463 if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
466 /* Free the table we allocated */
467 tablep = iopte_deref(table, data);
468 __arm_lpae_free_pgtable(data, lvl + 1, tablep);
470 return 0; /* Bytes unmapped */
474 __arm_lpae_set_pte(ptep, table, cfg);
475 iova &= ~(blk_size - 1);
476 cfg->tlb->tlb_add_flush(iova, blk_size, blk_size, true, data->iop.cookie);
480 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
481 unsigned long iova, size_t size, int lvl,
482 arm_lpae_iopte *ptep)
485 const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
486 void *cookie = data->iop.cookie;
487 size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
489 /* Something went horribly wrong and we ran out of page table */
490 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
493 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
498 /* If the size matches this level, we're in the right place */
499 if (size == blk_size) {
500 __arm_lpae_set_pte(ptep, 0, &data->iop.cfg);
502 if (!iopte_leaf(pte, lvl)) {
503 /* Also flush any partial walks */
504 tlb->tlb_add_flush(iova, size, ARM_LPAE_GRANULE(data),
506 tlb->tlb_sync(cookie);
507 ptep = iopte_deref(pte, data);
508 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
510 tlb->tlb_add_flush(iova, size, size, true, cookie);
514 } else if (iopte_leaf(pte, lvl)) {
516 * Insert a table at the next level to map the old region,
517 * minus the part we want to unmap
519 return arm_lpae_split_blk_unmap(data, iova, size,
520 iopte_prot(pte), lvl, ptep,
524 /* Keep on walkin' */
525 ptep = iopte_deref(pte, data);
526 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
529 static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
533 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
534 struct io_pgtable *iop = &data->iop;
535 arm_lpae_iopte *ptep = data->pgd;
536 int lvl = ARM_LPAE_START_LVL(data);
538 unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
540 iop->cfg.tlb->tlb_sync(iop->cookie);
545 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
548 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
549 arm_lpae_iopte pte, *ptep = data->pgd;
550 int lvl = ARM_LPAE_START_LVL(data);
553 /* Valid IOPTE pointer? */
557 /* Grab the IOPTE we're interested in */
558 pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
565 if (iopte_leaf(pte,lvl))
566 goto found_translation;
568 /* Take it to the next level */
569 ptep = iopte_deref(pte, data);
570 } while (++lvl < ARM_LPAE_MAX_LEVELS);
572 /* Ran out of page tables to walk */
576 iova &= (ARM_LPAE_GRANULE(data) - 1);
577 return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
580 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
582 unsigned long granule;
585 * We need to restrict the supported page sizes to match the
586 * translation regime for a particular granule. Aim to match
587 * the CPU page size if possible, otherwise prefer smaller sizes.
588 * While we're at it, restrict the block sizes to match the
591 if (cfg->pgsize_bitmap & PAGE_SIZE)
593 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
594 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
595 else if (cfg->pgsize_bitmap & PAGE_MASK)
596 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
602 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
605 cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
608 cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
611 cfg->pgsize_bitmap = 0;
615 static struct arm_lpae_io_pgtable *
616 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
618 unsigned long va_bits, pgd_bits;
619 struct arm_lpae_io_pgtable *data;
621 arm_lpae_restrict_pgsizes(cfg);
623 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
626 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
629 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
632 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
633 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
637 data = kmalloc(sizeof(*data), GFP_KERNEL);
641 data->pg_shift = __ffs(cfg->pgsize_bitmap);
642 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
644 va_bits = cfg->ias - data->pg_shift;
645 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
647 /* Calculate the actual size of our pgd (without concatenation) */
648 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
649 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
651 data->iop.ops = (struct io_pgtable_ops) {
653 .unmap = arm_lpae_unmap,
654 .iova_to_phys = arm_lpae_iova_to_phys,
660 static struct io_pgtable *
661 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
664 struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
670 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
671 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
672 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
674 switch (ARM_LPAE_GRANULE(data)) {
676 reg |= ARM_LPAE_TCR_TG0_4K;
679 reg |= ARM_LPAE_TCR_TG0_16K;
682 reg |= ARM_LPAE_TCR_TG0_64K;
688 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
691 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
694 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
697 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
700 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
703 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
709 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
711 /* Disable speculative walks through TTBR1 */
712 reg |= ARM_LPAE_TCR_EPD1;
713 cfg->arm_lpae_s1_cfg.tcr = reg;
716 reg = (ARM_LPAE_MAIR_ATTR_NC
717 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
718 (ARM_LPAE_MAIR_ATTR_WBRWA
719 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
720 (ARM_LPAE_MAIR_ATTR_DEVICE
721 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
723 cfg->arm_lpae_s1_cfg.mair[0] = reg;
724 cfg->arm_lpae_s1_cfg.mair[1] = 0;
726 /* Looking good; allocate a pgd */
727 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
731 /* Ensure the empty pgd is visible before any actual TTBR write */
735 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
736 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
744 static struct io_pgtable *
745 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
748 struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
754 * Concatenate PGDs at level 1 if possible in order to reduce
755 * the depth of the stage-2 walk.
757 if (data->levels == ARM_LPAE_MAX_LEVELS) {
758 unsigned long pgd_pages;
760 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
761 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
762 data->pgd_size = pgd_pages << data->pg_shift;
768 reg = ARM_64_LPAE_S2_TCR_RES1 |
769 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
770 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
771 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
773 sl = ARM_LPAE_START_LVL(data);
775 switch (ARM_LPAE_GRANULE(data)) {
777 reg |= ARM_LPAE_TCR_TG0_4K;
778 sl++; /* SL0 format is different for 4K granule size */
781 reg |= ARM_LPAE_TCR_TG0_16K;
784 reg |= ARM_LPAE_TCR_TG0_64K;
790 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
793 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
796 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
799 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
802 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
805 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
811 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
812 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
813 cfg->arm_lpae_s2_cfg.vtcr = reg;
815 /* Allocate pgd pages */
816 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
820 /* Ensure the empty pgd is visible before any actual TTBR write */
824 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
832 static struct io_pgtable *
833 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
835 struct io_pgtable *iop;
837 if (cfg->ias > 32 || cfg->oas > 40)
840 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
841 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
843 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
844 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
850 static struct io_pgtable *
851 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
853 struct io_pgtable *iop;
855 if (cfg->ias > 40 || cfg->oas > 40)
858 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
859 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
861 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
866 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
867 .alloc = arm_64_lpae_alloc_pgtable_s1,
868 .free = arm_lpae_free_pgtable,
871 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
872 .alloc = arm_64_lpae_alloc_pgtable_s2,
873 .free = arm_lpae_free_pgtable,
876 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
877 .alloc = arm_32_lpae_alloc_pgtable_s1,
878 .free = arm_lpae_free_pgtable,
881 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
882 .alloc = arm_32_lpae_alloc_pgtable_s2,
883 .free = arm_lpae_free_pgtable,
886 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
888 static struct io_pgtable_cfg *cfg_cookie;
890 static void dummy_tlb_flush_all(void *cookie)
892 WARN_ON(cookie != cfg_cookie);
895 static void dummy_tlb_add_flush(unsigned long iova, size_t size,
896 size_t granule, bool leaf, void *cookie)
898 WARN_ON(cookie != cfg_cookie);
899 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
902 static void dummy_tlb_sync(void *cookie)
904 WARN_ON(cookie != cfg_cookie);
907 static struct iommu_gather_ops dummy_tlb_ops __initdata = {
908 .tlb_flush_all = dummy_tlb_flush_all,
909 .tlb_add_flush = dummy_tlb_add_flush,
910 .tlb_sync = dummy_tlb_sync,
913 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
915 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
916 struct io_pgtable_cfg *cfg = &data->iop.cfg;
918 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
919 cfg->pgsize_bitmap, cfg->ias);
920 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
921 data->levels, data->pgd_size, data->pg_shift,
922 data->bits_per_level, data->pgd);
925 #define __FAIL(ops, i) ({ \
926 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
927 arm_lpae_dump_ops(ops); \
928 selftest_running = false; \
932 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
934 static const enum io_pgtable_fmt fmts[] = {
942 struct io_pgtable_ops *ops;
944 selftest_running = true;
946 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
948 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
950 pr_err("selftest: failed to allocate io pgtable ops\n");
955 * Initial sanity checks.
956 * Empty page tables shouldn't provide any translations.
958 if (ops->iova_to_phys(ops, 42))
959 return __FAIL(ops, i);
961 if (ops->iova_to_phys(ops, SZ_1G + 42))
962 return __FAIL(ops, i);
964 if (ops->iova_to_phys(ops, SZ_2G + 42))
965 return __FAIL(ops, i);
968 * Distinct mappings of different granule sizes.
971 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
972 while (j != BITS_PER_LONG) {
975 if (ops->map(ops, iova, iova, size, IOMMU_READ |
979 return __FAIL(ops, i);
981 /* Overlapping mappings */
982 if (!ops->map(ops, iova, iova + size, size,
983 IOMMU_READ | IOMMU_NOEXEC))
984 return __FAIL(ops, i);
986 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
987 return __FAIL(ops, i);
991 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
995 size = 1UL << __ffs(cfg->pgsize_bitmap);
996 if (ops->unmap(ops, SZ_1G + size, size) != size)
997 return __FAIL(ops, i);
999 /* Remap of partial unmap */
1000 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1001 return __FAIL(ops, i);
1003 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1004 return __FAIL(ops, i);
1008 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
1009 while (j != BITS_PER_LONG) {
1012 if (ops->unmap(ops, iova, size) != size)
1013 return __FAIL(ops, i);
1015 if (ops->iova_to_phys(ops, iova + 42))
1016 return __FAIL(ops, i);
1018 /* Remap full block */
1019 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1020 return __FAIL(ops, i);
1022 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1023 return __FAIL(ops, i);
1027 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1030 free_io_pgtable_ops(ops);
1033 selftest_running = false;
1037 static int __init arm_lpae_do_selftests(void)
1039 static const unsigned long pgsize[] = {
1040 SZ_4K | SZ_2M | SZ_1G,
1045 static const unsigned int ias[] = {
1046 32, 36, 40, 42, 44, 48,
1049 int i, j, pass = 0, fail = 0;
1050 struct io_pgtable_cfg cfg = {
1051 .tlb = &dummy_tlb_ops,
1055 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1056 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1057 cfg.pgsize_bitmap = pgsize[i];
1059 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1061 if (arm_lpae_run_tests(&cfg))
1068 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1069 return fail ? -EFAULT : 0;
1071 subsys_initcall(arm_lpae_do_selftests);