1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 Google LLC
7 #ifndef __ARM64_KVM_PGTABLE_H__
8 #define __ARM64_KVM_PGTABLE_H__
10 #include <linux/bits.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
14 #define KVM_PGTABLE_FIRST_LEVEL -1
15 #define KVM_PGTABLE_LAST_LEVEL 3
18 * The largest supported block sizes for KVM (no 52-bit PA support):
20 * - 16K (level 2): 32MB
21 * - 64K (level 2): 512MB
23 #ifdef CONFIG_ARM64_4K_PAGES
24 #define KVM_PGTABLE_MIN_BLOCK_LEVEL 1
26 #define KVM_PGTABLE_MIN_BLOCK_LEVEL 2
29 #define kvm_lpa2_is_enabled() system_supports_lpa2()
31 static inline u64 kvm_get_parange_max(void)
33 if (kvm_lpa2_is_enabled() ||
34 (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && PAGE_SHIFT == 16))
35 return ID_AA64MMFR0_EL1_PARANGE_52;
37 return ID_AA64MMFR0_EL1_PARANGE_48;
40 static inline u64 kvm_get_parange(u64 mmfr0)
42 u64 parange_max = kvm_get_parange_max();
43 u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
44 ID_AA64MMFR0_EL1_PARANGE_SHIFT);
45 if (parange > parange_max)
46 parange = parange_max;
51 typedef u64 kvm_pte_t;
53 #define KVM_PTE_VALID BIT(0)
55 #define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
56 #define KVM_PTE_ADDR_51_48 GENMASK(15, 12)
57 #define KVM_PTE_ADDR_MASK_LPA2 GENMASK(49, PAGE_SHIFT)
58 #define KVM_PTE_ADDR_51_50_LPA2 GENMASK(9, 8)
60 #define KVM_PHYS_INVALID (-1ULL)
62 #define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2)
64 #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
65 #define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6)
66 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO \
67 ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; })
68 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW \
69 ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; })
70 #define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8)
71 #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3
72 #define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10)
74 #define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2)
75 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6)
76 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7)
77 #define KVM_PTE_LEAF_ATTR_LO_S2_SH GENMASK(9, 8)
78 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3
79 #define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10)
81 #define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 50)
83 #define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55)
85 #define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
87 #define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
89 #define KVM_PTE_LEAF_ATTR_HI_S1_GP BIT(50)
91 #define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
92 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
93 KVM_PTE_LEAF_ATTR_HI_S2_XN)
95 #define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
96 #define KVM_MAX_OWNER_ID 1
99 * Used to indicate a pte for which a 'break-before-make' sequence is in
102 #define KVM_INVALID_PTE_LOCKED BIT(10)
104 static inline bool kvm_pte_valid(kvm_pte_t pte)
106 return pte & KVM_PTE_VALID;
109 static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
113 if (kvm_lpa2_is_enabled()) {
114 pa = pte & KVM_PTE_ADDR_MASK_LPA2;
115 pa |= FIELD_GET(KVM_PTE_ADDR_51_50_LPA2, pte) << 50;
117 pa = pte & KVM_PTE_ADDR_MASK;
118 if (PAGE_SHIFT == 16)
119 pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
125 static inline kvm_pte_t kvm_phys_to_pte(u64 pa)
129 if (kvm_lpa2_is_enabled()) {
130 pte = pa & KVM_PTE_ADDR_MASK_LPA2;
131 pa &= GENMASK(51, 50);
132 pte |= FIELD_PREP(KVM_PTE_ADDR_51_50_LPA2, pa >> 50);
134 pte = pa & KVM_PTE_ADDR_MASK;
135 if (PAGE_SHIFT == 16) {
136 pa &= GENMASK(51, 48);
137 pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
144 static inline kvm_pfn_t kvm_pte_to_pfn(kvm_pte_t pte)
146 return __phys_to_pfn(kvm_pte_to_phys(pte));
149 static inline u64 kvm_granule_shift(s8 level)
151 /* Assumes KVM_PGTABLE_LAST_LEVEL is 3 */
152 return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
155 static inline u64 kvm_granule_size(s8 level)
157 return BIT(kvm_granule_shift(level));
160 static inline bool kvm_level_supports_block_mapping(s8 level)
162 return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL;
165 static inline u32 kvm_supported_block_sizes(void)
167 s8 level = KVM_PGTABLE_MIN_BLOCK_LEVEL;
170 for (; level <= KVM_PGTABLE_LAST_LEVEL; level++)
171 r |= BIT(kvm_granule_shift(level));
176 static inline bool kvm_is_block_size_supported(u64 size)
178 bool is_power_of_two = IS_ALIGNED(size, size);
180 return is_power_of_two && (size & kvm_supported_block_sizes());
184 * struct kvm_pgtable_mm_ops - Memory management callbacks.
185 * @zalloc_page: Allocate a single zeroed memory page.
186 * The @arg parameter can be used by the walker
187 * to pass a memcache. The initial refcount of
189 * @zalloc_pages_exact: Allocate an exact number of zeroed memory pages.
190 * The @size parameter is in bytes, and is rounded
191 * up to the next page boundary. The resulting
192 * allocation is physically contiguous.
193 * @free_pages_exact: Free an exact number of memory pages previously
194 * allocated by zalloc_pages_exact.
195 * @free_unlinked_table: Free an unlinked paging structure by unlinking and
196 * dropping references.
197 * @get_page: Increment the refcount on a page.
198 * @put_page: Decrement the refcount on a page. When the
199 * refcount reaches 0 the page is automatically
201 * @page_count: Return the refcount of a page.
202 * @phys_to_virt: Convert a physical address into a virtual
203 * address mapped in the current context.
204 * @virt_to_phys: Convert a virtual address mapped in the current
205 * context into a physical address.
206 * @dcache_clean_inval_poc: Clean and invalidate the data cache to the PoC
207 * for the specified memory address range.
208 * @icache_inval_pou: Invalidate the instruction cache to the PoU
209 * for the specified memory address range.
211 struct kvm_pgtable_mm_ops {
212 void* (*zalloc_page)(void *arg);
213 void* (*zalloc_pages_exact)(size_t size);
214 void (*free_pages_exact)(void *addr, size_t size);
215 void (*free_unlinked_table)(void *addr, s8 level);
216 void (*get_page)(void *addr);
217 void (*put_page)(void *addr);
218 int (*page_count)(void *addr);
219 void* (*phys_to_virt)(phys_addr_t phys);
220 phys_addr_t (*virt_to_phys)(void *addr);
221 void (*dcache_clean_inval_poc)(void *addr, size_t size);
222 void (*icache_inval_pou)(void *addr, size_t size);
226 * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
227 * @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have
228 * ARM64_HAS_STAGE2_FWB.
229 * @KVM_PGTABLE_S2_IDMAP: Only use identity mappings.
231 enum kvm_pgtable_stage2_flags {
232 KVM_PGTABLE_S2_NOFWB = BIT(0),
233 KVM_PGTABLE_S2_IDMAP = BIT(1),
237 * enum kvm_pgtable_prot - Page-table permissions and attributes.
238 * @KVM_PGTABLE_PROT_X: Execute permission.
239 * @KVM_PGTABLE_PROT_W: Write permission.
240 * @KVM_PGTABLE_PROT_R: Read permission.
241 * @KVM_PGTABLE_PROT_DEVICE: Device attributes.
242 * @KVM_PGTABLE_PROT_NORMAL_NC: Normal noncacheable attributes.
243 * @KVM_PGTABLE_PROT_SW0: Software bit 0.
244 * @KVM_PGTABLE_PROT_SW1: Software bit 1.
245 * @KVM_PGTABLE_PROT_SW2: Software bit 2.
246 * @KVM_PGTABLE_PROT_SW3: Software bit 3.
248 enum kvm_pgtable_prot {
249 KVM_PGTABLE_PROT_X = BIT(0),
250 KVM_PGTABLE_PROT_W = BIT(1),
251 KVM_PGTABLE_PROT_R = BIT(2),
253 KVM_PGTABLE_PROT_DEVICE = BIT(3),
254 KVM_PGTABLE_PROT_NORMAL_NC = BIT(4),
256 KVM_PGTABLE_PROT_SW0 = BIT(55),
257 KVM_PGTABLE_PROT_SW1 = BIT(56),
258 KVM_PGTABLE_PROT_SW2 = BIT(57),
259 KVM_PGTABLE_PROT_SW3 = BIT(58),
262 #define KVM_PGTABLE_PROT_RW (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W)
263 #define KVM_PGTABLE_PROT_RWX (KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_X)
265 #define PKVM_HOST_MEM_PROT KVM_PGTABLE_PROT_RWX
266 #define PKVM_HOST_MMIO_PROT KVM_PGTABLE_PROT_RW
268 #define PAGE_HYP KVM_PGTABLE_PROT_RW
269 #define PAGE_HYP_EXEC (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X)
270 #define PAGE_HYP_RO (KVM_PGTABLE_PROT_R)
271 #define PAGE_HYP_DEVICE (PAGE_HYP | KVM_PGTABLE_PROT_DEVICE)
273 typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
274 enum kvm_pgtable_prot prot);
277 * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
278 * @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid
280 * @KVM_PGTABLE_WALK_TABLE_PRE: Visit table entries before their
282 * @KVM_PGTABLE_WALK_TABLE_POST: Visit table entries after their
284 * @KVM_PGTABLE_WALK_SHARED: Indicates the page-tables may be shared
285 * with other software walkers.
286 * @KVM_PGTABLE_WALK_HANDLE_FAULT: Indicates the page-table walk was
287 * invoked from a fault handler.
288 * @KVM_PGTABLE_WALK_SKIP_BBM_TLBI: Visit and update table entries
289 * without Break-before-make's
291 * @KVM_PGTABLE_WALK_SKIP_CMO: Visit and update table entries
292 * without Cache maintenance
293 * operations required.
295 enum kvm_pgtable_walk_flags {
296 KVM_PGTABLE_WALK_LEAF = BIT(0),
297 KVM_PGTABLE_WALK_TABLE_PRE = BIT(1),
298 KVM_PGTABLE_WALK_TABLE_POST = BIT(2),
299 KVM_PGTABLE_WALK_SHARED = BIT(3),
300 KVM_PGTABLE_WALK_HANDLE_FAULT = BIT(4),
301 KVM_PGTABLE_WALK_SKIP_BBM_TLBI = BIT(5),
302 KVM_PGTABLE_WALK_SKIP_CMO = BIT(6),
305 struct kvm_pgtable_visit_ctx {
309 struct kvm_pgtable_mm_ops *mm_ops;
314 enum kvm_pgtable_walk_flags flags;
317 typedef int (*kvm_pgtable_visitor_fn_t)(const struct kvm_pgtable_visit_ctx *ctx,
318 enum kvm_pgtable_walk_flags visit);
320 static inline bool kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx *ctx)
322 return ctx->flags & KVM_PGTABLE_WALK_SHARED;
326 * struct kvm_pgtable_walker - Hook into a page-table walk.
327 * @cb: Callback function to invoke during the walk.
328 * @arg: Argument passed to the callback function.
329 * @flags: Bitwise-OR of flags to identify the entry types on which to
330 * invoke the callback function.
332 struct kvm_pgtable_walker {
333 const kvm_pgtable_visitor_fn_t cb;
335 const enum kvm_pgtable_walk_flags flags;
339 * RCU cannot be used in a non-kernel context such as the hyp. As such, page
340 * table walkers used in hyp do not call into RCU and instead use other
341 * synchronization mechanisms (such as a spinlock).
343 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
345 typedef kvm_pte_t *kvm_pteref_t;
347 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
353 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
356 * Due to the lack of RCU (or a similar protection scheme), only
357 * non-shared table walkers are allowed in the hypervisor.
359 if (walker->flags & KVM_PGTABLE_WALK_SHARED)
365 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) {}
367 static inline bool kvm_pgtable_walk_lock_held(void)
374 typedef kvm_pte_t __rcu *kvm_pteref_t;
376 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
379 return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
382 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
384 if (walker->flags & KVM_PGTABLE_WALK_SHARED)
390 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker)
392 if (walker->flags & KVM_PGTABLE_WALK_SHARED)
396 static inline bool kvm_pgtable_walk_lock_held(void)
398 return rcu_read_lock_held();
404 * struct kvm_pgtable - KVM page-table.
405 * @ia_bits: Maximum input address size, in bits.
406 * @start_level: Level at which the page-table walk starts.
407 * @pgd: Pointer to the first top-level entry of the page-table.
408 * @mm_ops: Memory management callbacks.
409 * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
410 * @flags: Stage-2 page-table flags.
411 * @force_pte_cb: Function that returns true if page level mappings must
412 * be used instead of block mappings.
416 struct rb_root pkvm_mappings;
421 struct kvm_pgtable_mm_ops *mm_ops;
424 enum kvm_pgtable_stage2_flags flags;
425 kvm_pgtable_force_pte_cb_t force_pte_cb;
428 struct kvm_s2_mmu *mmu;
432 * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
433 * @pgt: Uninitialised page-table structure to initialise.
434 * @va_bits: Maximum virtual address bits.
435 * @mm_ops: Memory management callbacks.
437 * Return: 0 on success, negative error code on failure.
439 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
440 struct kvm_pgtable_mm_ops *mm_ops);
443 * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table.
444 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
446 * The page-table is assumed to be unreachable by any hardware walkers prior
447 * to freeing and therefore no TLB invalidation is performed.
449 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
452 * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table.
453 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
454 * @addr: Virtual address at which to place the mapping.
455 * @size: Size of the mapping.
456 * @phys: Physical address of the memory to map.
457 * @prot: Permissions and attributes for the mapping.
459 * The offset of @addr within a page is ignored, @size is rounded-up to
460 * the next page boundary and @phys is rounded-down to the previous page
463 * If device attributes are not explicitly requested in @prot, then the
464 * mapping will be normal, cacheable. Attempts to install a new mapping
465 * for a virtual address that is already mapped will be rejected with an
466 * error and a WARN().
468 * Return: 0 on success, negative error code on failure.
470 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
471 enum kvm_pgtable_prot prot);
474 * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table.
475 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
476 * @addr: Virtual address from which to remove the mapping.
477 * @size: Size of the mapping.
479 * The offset of @addr within a page is ignored, @size is rounded-up to
480 * the next page boundary and @phys is rounded-down to the previous page
483 * TLB invalidation is performed for each page-table entry cleared during the
484 * unmapping operation and the reference count for the page-table page
485 * containing the cleared entry is decremented, with unreferenced pages being
486 * freed. The unmapping operation will stop early if it encounters either an
487 * invalid page-table entry or a valid block mapping which maps beyond the range
490 * Return: Number of bytes unmapped, which may be 0.
492 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
495 * kvm_get_vtcr() - Helper to construct VTCR_EL2
496 * @mmfr0: Sanitized value of SYS_ID_AA64MMFR0_EL1 register.
497 * @mmfr1: Sanitized value of SYS_ID_AA64MMFR1_EL1 register.
498 * @phys_shfit: Value to set in VTCR_EL2.T0SZ.
500 * The VTCR value is common across all the physical CPUs on the system.
501 * We use system wide sanitised values to fill in different fields,
502 * except for Hardware Management of Access Flags. HA Flag is set
503 * unconditionally on all CPUs, as it is safe to run with or without
504 * the feature and the bit is RES0 on CPUs that don't support it.
506 * Return: VTCR_EL2 value
508 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
511 * kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD
512 * @vtcr: Content of the VTCR register.
514 * Return: the size (in bytes) of the stage-2 PGD
516 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr);
519 * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
520 * @pgt: Uninitialised page-table structure to initialise.
521 * @mmu: S2 MMU context for this S2 translation
522 * @mm_ops: Memory management callbacks.
523 * @flags: Stage-2 configuration flags.
524 * @force_pte_cb: Function that returns true if page level mappings must
525 * be used instead of block mappings.
527 * Return: 0 on success, negative error code on failure.
529 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
530 struct kvm_pgtable_mm_ops *mm_ops,
531 enum kvm_pgtable_stage2_flags flags,
532 kvm_pgtable_force_pte_cb_t force_pte_cb);
534 static inline int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
535 struct kvm_pgtable_mm_ops *mm_ops)
537 return __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL);
541 * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
542 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
544 * The page-table is assumed to be unreachable by any hardware walkers prior
545 * to freeing and therefore no TLB invalidation is performed.
547 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
550 * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
551 * @mm_ops: Memory management callbacks.
552 * @pgtable: Unlinked stage-2 paging structure to be freed.
553 * @level: Level of the stage-2 paging structure to be freed.
555 * The page-table is assumed to be unreachable by any hardware walkers prior to
556 * freeing and therefore no TLB invalidation is performed.
558 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level);
561 * kvm_pgtable_stage2_create_unlinked() - Create an unlinked stage-2 paging structure.
562 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
563 * @phys: Physical address of the memory to map.
564 * @level: Starting level of the stage-2 paging structure to be created.
565 * @prot: Permissions and attributes for the mapping.
566 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
568 * @force_pte: Force mappings to PAGE_SIZE granularity.
570 * Returns an unlinked page-table tree. This new page-table tree is
571 * not reachable (i.e., it is unlinked) from the root pgd and it's
572 * therefore unreachableby the hardware page-table walker. No TLB
573 * invalidation or CMOs are performed.
575 * If device attributes are not explicitly requested in @prot, then the
576 * mapping will be normal, cacheable.
578 * Return: The fully populated (unlinked) stage-2 paging structure, or
579 * an ERR_PTR(error) on failure.
581 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
583 enum kvm_pgtable_prot prot,
584 void *mc, bool force_pte);
587 * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
588 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
589 * @addr: Intermediate physical address at which to place the mapping.
590 * @size: Size of the mapping.
591 * @phys: Physical address of the memory to map.
592 * @prot: Permissions and attributes for the mapping.
593 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
595 * @flags: Flags to control the page-table walk (ex. a shared walk)
597 * The offset of @addr within a page is ignored, @size is rounded-up to
598 * the next page boundary and @phys is rounded-down to the previous page
601 * If device attributes are not explicitly requested in @prot, then the
602 * mapping will be normal, cacheable.
604 * Note that the update of a valid leaf PTE in this function will be aborted,
605 * if it's trying to recreate the exact same mapping or only change the access
606 * permissions. Instead, the vCPU will exit one more time from guest if still
607 * needed and then go through the path of relaxing permissions.
609 * Note that this function will both coalesce existing table entries and split
610 * existing block mappings, relying on page-faults to fault back areas outside
611 * of the new mapping lazily.
613 * Return: 0 on success, negative error code on failure.
615 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
616 u64 phys, enum kvm_pgtable_prot prot,
617 void *mc, enum kvm_pgtable_walk_flags flags);
620 * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
622 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
623 * @addr: Base intermediate physical address to annotate.
624 * @size: Size of the annotated range.
625 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
627 * @owner_id: Unique identifier for the owner of the page.
629 * By default, all page-tables are owned by identifier 0. This function can be
630 * used to mark portions of the IPA space as owned by other entities. When a
631 * stage 2 is used with identity-mappings, these annotations allow to use the
632 * page-table data structure as a simple rmap.
634 * Return: 0 on success, negative error code on failure.
636 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
637 void *mc, u8 owner_id);
640 * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
641 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
642 * @addr: Intermediate physical address from which to remove the mapping.
643 * @size: Size of the mapping.
645 * The offset of @addr within a page is ignored and @size is rounded-up to
646 * the next page boundary.
648 * TLB invalidation is performed for each page-table entry cleared during the
649 * unmapping operation and the reference count for the page-table page
650 * containing the cleared entry is decremented, with unreferenced pages being
651 * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if
652 * FWB is not supported by the CPU.
654 * Return: 0 on success, negative error code on failure.
656 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
659 * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
660 * without TLB invalidation.
661 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
662 * @addr: Intermediate physical address from which to write-protect,
663 * @size: Size of the range.
665 * The offset of @addr within a page is ignored and @size is rounded-up to
666 * the next page boundary.
668 * Note that it is the caller's responsibility to invalidate the TLB after
669 * calling this function to ensure that the updated permissions are visible
672 * Return: 0 on success, negative error code on failure.
674 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
677 * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
678 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
679 * @addr: Intermediate physical address to identify the page-table entry.
680 * @flags: Flags to control the page-table walk (ex. a shared walk)
682 * The offset of @addr within a page is ignored.
684 * If there is a valid, leaf page-table entry used to translate @addr, then
685 * set the access flag in that entry.
687 void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
688 enum kvm_pgtable_walk_flags flags);
691 * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access
692 * flag in a page-table entry.
693 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
694 * @addr: Intermediate physical address to identify the page-table entry.
695 * @size: Size of the address range to visit.
696 * @mkold: True if the access flag should be cleared.
698 * The offset of @addr within a page is ignored.
700 * Tests and conditionally clears the access flag for every valid, leaf
701 * page-table entry used to translate the range [@addr, @addr + @size).
703 * Note that it is the caller's responsibility to invalidate the TLB after
704 * calling this function to ensure that the updated permissions are visible
707 * Return: True if any of the visited PTEs had the access flag set.
709 bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
710 u64 size, bool mkold);
713 * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
715 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
716 * @addr: Intermediate physical address to identify the page-table entry.
717 * @prot: Additional permissions to grant for the mapping.
718 * @flags: Flags to control the page-table walk (ex. a shared walk)
720 * The offset of @addr within a page is ignored.
722 * If there is a valid, leaf page-table entry used to translate @addr, then
723 * relax the permissions in that entry according to the read, write and
724 * execute permissions specified by @prot. No permissions are removed, and
725 * TLB invalidation is performed after updating the entry. Software bits cannot
726 * be set or cleared using kvm_pgtable_stage2_relax_perms().
728 * Return: 0 on success, negative error code on failure.
730 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
731 enum kvm_pgtable_prot prot,
732 enum kvm_pgtable_walk_flags flags);
735 * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
736 * of Coherency for guest stage-2 address
738 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
739 * @addr: Intermediate physical address from which to flush.
740 * @size: Size of the range.
742 * The offset of @addr within a page is ignored and @size is rounded-up to
743 * the next page boundary.
745 * Return: 0 on success, negative error code on failure.
747 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
750 * kvm_pgtable_stage2_split() - Split a range of huge pages into leaf PTEs pointing
751 * to PAGE_SIZE guest pages.
752 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
753 * @addr: Intermediate physical address from which to split.
754 * @size: Size of the range.
755 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
758 * The function tries to split any level 1 or 2 entry that overlaps
759 * with the input range (given by @addr and @size).
761 * Return: 0 on success, negative error code on failure. Note that
762 * kvm_pgtable_stage2_split() is best effort: it tries to break as many
763 * blocks in the input range as allowed by @mc_capacity.
765 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
766 struct kvm_mmu_memory_cache *mc);
769 * kvm_pgtable_walk() - Walk a page-table.
770 * @pgt: Page-table structure initialised by kvm_pgtable_*_init().
771 * @addr: Input address for the start of the walk.
772 * @size: Size of the range to walk.
773 * @walker: Walker callback description.
775 * The offset of @addr within a page is ignored and @size is rounded-up to
776 * the next page boundary.
778 * The walker will walk the page-table entries corresponding to the input
779 * address range specified, visiting entries according to the walker flags.
780 * Invalid entries are treated as leaf entries. The visited page table entry is
781 * reloaded after invoking the walker callback, allowing the walker to descend
782 * into a newly installed table.
784 * Returning a negative error code from the walker callback function will
785 * terminate the walk immediately with the same error code.
787 * Return: 0 on success, negative error code on failure.
789 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
790 struct kvm_pgtable_walker *walker);
793 * kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry
795 * @pgt: Page-table structure initialised by kvm_pgtable_*_init()
796 * or a similar initialiser.
797 * @addr: Input address for the start of the walk.
798 * @ptep: Pointer to storage for the retrieved PTE.
799 * @level: Pointer to storage for the level of the retrieved PTE.
801 * The offset of @addr within a page is ignored.
803 * The walker will walk the page-table entries corresponding to the input
804 * address specified, retrieving the leaf corresponding to this address.
805 * Invalid entries are treated as leaf entries.
807 * Return: 0 on success, negative error code on failure.
809 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
810 kvm_pte_t *ptep, s8 *level);
813 * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a
814 * stage-2 Page-Table Entry.
815 * @pte: Page-table entry
817 * Return: protection attributes of the page-table entry in the enum
818 * kvm_pgtable_prot format.
820 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte);
823 * kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1
825 * @pte: Page-table entry
827 * Return: protection attributes of the page-table entry in the enum
828 * kvm_pgtable_prot format.
830 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
833 * kvm_tlb_flush_vmid_range() - Invalidate/flush a range of TLB entries
835 * @mmu: Stage-2 KVM MMU struct
836 * @addr: The base Intermediate physical address from which to invalidate
837 * @size: Size of the range from the base to invalidate
839 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
840 phys_addr_t addr, size_t size);
841 #endif /* __ARM64_KVM_PGTABLE_H__ */