1 // SPDX-License-Identifier: GPL-2.0-only
3 * tools/testing/selftests/kvm/lib/kvm_util.c
5 * Copyright (C) 2018, Google LLC.
8 #define _GNU_SOURCE /* for program_invocation_name */
11 #include "processor.h"
16 #include <sys/types.h>
19 #include <linux/kernel.h>
21 #define KVM_UTIL_MIN_PFN 2
23 static int vcpu_mmap_sz(void);
25 int open_path_or_exit(const char *path, int flags)
29 fd = open(path, flags);
30 __TEST_REQUIRE(fd >= 0, "%s not available (errno: %d)", path, errno);
36 * Open KVM_DEV_PATH if available, otherwise exit the entire program.
39 * flags - The flags to pass when opening KVM_DEV_PATH.
42 * The opened file descriptor of /dev/kvm.
44 static int _open_kvm_dev_path_or_exit(int flags)
46 return open_path_or_exit(KVM_DEV_PATH, flags);
49 int open_kvm_dev_path_or_exit(void)
51 return _open_kvm_dev_path_or_exit(O_RDONLY);
54 static bool get_module_param_bool(const char *module_name, const char *param)
56 const int path_size = 128;
62 r = snprintf(path, path_size, "/sys/module/%s/parameters/%s",
64 TEST_ASSERT(r < path_size,
65 "Failed to construct sysfs path in %d bytes.", path_size);
67 fd = open_path_or_exit(path, O_RDONLY);
69 r = read(fd, &value, 1);
70 TEST_ASSERT(r == 1, "read(%s) failed", path);
73 TEST_ASSERT(!r, "close(%s) failed", path);
77 else if (value == 'N')
80 TEST_FAIL("Unrecognized value '%c' for boolean module param", value);
83 bool get_kvm_param_bool(const char *param)
85 return get_module_param_bool("kvm", param);
88 bool get_kvm_intel_param_bool(const char *param)
90 return get_module_param_bool("kvm_intel", param);
93 bool get_kvm_amd_param_bool(const char *param)
95 return get_module_param_bool("kvm_amd", param);
107 * On success, the Value corresponding to the capability (KVM_CAP_*)
108 * specified by the value of cap. On failure a TEST_ASSERT failure
111 * Looks up and returns the value corresponding to the capability
112 * (KVM_CAP_*) given by cap.
114 unsigned int kvm_check_cap(long cap)
119 kvm_fd = open_kvm_dev_path_or_exit();
120 ret = __kvm_ioctl(kvm_fd, KVM_CHECK_EXTENSION, (void *)cap);
121 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret));
125 return (unsigned int)ret;
128 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
130 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL))
131 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size);
133 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size);
134 vm->dirty_ring_size = ring_size;
137 static void vm_open(struct kvm_vm *vm)
139 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR);
141 TEST_REQUIRE(kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT));
143 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type);
144 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd));
147 const char *vm_guest_mode_string(uint32_t i)
149 static const char * const strings[] = {
150 [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages",
151 [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages",
152 [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages",
153 [VM_MODE_P48V48_16K] = "PA-bits:48, VA-bits:48, 16K pages",
154 [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages",
155 [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages",
156 [VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages",
157 [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
158 [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages",
159 [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages",
160 [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages",
161 [VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages",
162 [VM_MODE_P36V48_16K] = "PA-bits:36, VA-bits:48, 16K pages",
163 [VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages",
164 [VM_MODE_P36V47_16K] = "PA-bits:36, VA-bits:47, 16K pages",
166 _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
167 "Missing new mode strings?");
169 TEST_ASSERT(i < NUM_VM_MODES, "Guest mode ID %d too big", i);
174 const struct vm_guest_mode_params vm_guest_mode_params[] = {
175 [VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 },
176 [VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 },
177 [VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 },
178 [VM_MODE_P48V48_16K] = { 48, 48, 0x4000, 14 },
179 [VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 },
180 [VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 },
181 [VM_MODE_P40V48_16K] = { 40, 48, 0x4000, 14 },
182 [VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 },
183 [VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 },
184 [VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 },
185 [VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 },
186 [VM_MODE_P36V48_4K] = { 36, 48, 0x1000, 12 },
187 [VM_MODE_P36V48_16K] = { 36, 48, 0x4000, 14 },
188 [VM_MODE_P36V48_64K] = { 36, 48, 0x10000, 16 },
189 [VM_MODE_P36V47_16K] = { 36, 47, 0x4000, 14 },
191 _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
192 "Missing new mode params?");
195 * Initializes vm->vpages_valid to match the canonical VA space of the
198 * The default implementation is valid for architectures which split the
199 * range addressed by a single page table into a low and high region
200 * based on the MSB of the VA. On architectures with this behavior
201 * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1].
203 __weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
205 sparsebit_set_num(vm->vpages_valid,
206 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
207 sparsebit_set_num(vm->vpages_valid,
208 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
209 (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
212 struct kvm_vm *____vm_create(enum vm_guest_mode mode)
216 vm = calloc(1, sizeof(*vm));
217 TEST_ASSERT(vm != NULL, "Insufficient Memory");
219 INIT_LIST_HEAD(&vm->vcpus);
220 vm->regions.gpa_tree = RB_ROOT;
221 vm->regions.hva_tree = RB_ROOT;
222 hash_init(vm->regions.slot_hash);
227 vm->pa_bits = vm_guest_mode_params[mode].pa_bits;
228 vm->va_bits = vm_guest_mode_params[mode].va_bits;
229 vm->page_size = vm_guest_mode_params[mode].page_size;
230 vm->page_shift = vm_guest_mode_params[mode].page_shift;
232 /* Setup mode specific traits. */
234 case VM_MODE_P52V48_4K:
235 vm->pgtable_levels = 4;
237 case VM_MODE_P52V48_64K:
238 vm->pgtable_levels = 3;
240 case VM_MODE_P48V48_4K:
241 vm->pgtable_levels = 4;
243 case VM_MODE_P48V48_64K:
244 vm->pgtable_levels = 3;
246 case VM_MODE_P40V48_4K:
247 case VM_MODE_P36V48_4K:
248 vm->pgtable_levels = 4;
250 case VM_MODE_P40V48_64K:
251 case VM_MODE_P36V48_64K:
252 vm->pgtable_levels = 3;
254 case VM_MODE_P48V48_16K:
255 case VM_MODE_P40V48_16K:
256 case VM_MODE_P36V48_16K:
257 vm->pgtable_levels = 4;
259 case VM_MODE_P36V47_16K:
260 vm->pgtable_levels = 3;
262 case VM_MODE_PXXV48_4K:
264 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits);
266 * Ignore KVM support for 5-level paging (vm->va_bits == 57),
267 * it doesn't take effect unless a CR4.LA57 is set, which it
268 * isn't for this VM_MODE.
270 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
271 "Linear address width (%d bits) not supported",
273 pr_debug("Guest physical address width detected: %d\n",
275 vm->pgtable_levels = 4;
278 TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
281 case VM_MODE_P47V64_4K:
282 vm->pgtable_levels = 5;
284 case VM_MODE_P44V64_4K:
285 vm->pgtable_levels = 5;
288 TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
292 if (vm->pa_bits != 40)
293 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
298 /* Limit to VA-bit canonical virtual addresses. */
299 vm->vpages_valid = sparsebit_alloc();
300 vm_vaddr_populate_bitmap(vm);
302 /* Limit physical addresses to PA-bits. */
303 vm->max_gfn = vm_compute_max_gfn(vm);
305 /* Allocate and setup memory for guest. */
306 vm->vpages_mapped = sparsebit_alloc();
311 static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
312 uint32_t nr_runnable_vcpus,
313 uint64_t extra_mem_pages)
317 TEST_ASSERT(nr_runnable_vcpus,
318 "Use vm_create_barebones() for VMs that _never_ have vCPUs\n");
320 TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
321 "nr_vcpus = %d too large for host, max-vcpus = %d",
322 nr_runnable_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
325 * Arbitrarily allocate 512 pages (2mb when page size is 4kb) for the
326 * test code and other per-VM assets that will be loaded into memslot0.
330 /* Account for the per-vCPU stacks on behalf of the test. */
331 nr_pages += nr_runnable_vcpus * DEFAULT_STACK_PGS;
334 * Account for the number of pages needed for the page tables. The
335 * maximum page table size for a memory region will be when the
336 * smallest page size is used. Considering each page contains x page
337 * table descriptors, the total extra size for page tables (for extra
338 * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
341 nr_pages += (nr_pages + extra_mem_pages) / PTES_PER_MIN_PAGE * 2;
343 return vm_adjust_num_guest_pages(mode, nr_pages);
346 struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
347 uint64_t nr_extra_pages)
349 uint64_t nr_pages = vm_nr_pages_required(mode, nr_runnable_vcpus,
351 struct userspace_mem_region *slot0;
355 pr_debug("%s: mode='%s' pages='%ld'\n", __func__,
356 vm_guest_mode_string(mode), nr_pages);
358 vm = ____vm_create(mode);
360 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, 0);
361 for (i = 0; i < NR_MEM_REGIONS; i++)
364 kvm_vm_elf_load(vm, program_invocation_name);
367 * TODO: Add proper defines to protect the library's memslots, and then
368 * carve out memslot1 for the ucall MMIO address. KVM treats writes to
369 * read-only memslots as MMIO, and creating a read-only memslot for the
370 * MMIO region would prevent silently clobbering the MMIO region.
372 slot0 = memslot2region(vm, 0);
373 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
375 kvm_arch_vm_post_create(vm);
381 * VM Create with customized parameters
384 * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
385 * nr_vcpus - VCPU count
386 * extra_mem_pages - Non-slot0 physical memory total size
387 * guest_code - Guest entry point
393 * Pointer to opaque structure that describes the created VM.
395 * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K).
396 * extra_mem_pages is only used to calculate the maximum page table size,
397 * no real memory allocation for non-slot0 memory in this function.
399 struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
400 uint64_t extra_mem_pages,
401 void *guest_code, struct kvm_vcpu *vcpus[])
406 TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
408 vm = __vm_create(mode, nr_vcpus, extra_mem_pages);
410 for (i = 0; i < nr_vcpus; ++i)
411 vcpus[i] = vm_vcpu_add(vm, i, guest_code);
416 struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
417 uint64_t extra_mem_pages,
420 struct kvm_vcpu *vcpus[1];
423 vm = __vm_create_with_vcpus(VM_MODE_DEFAULT, 1, extra_mem_pages,
434 * vm - VM that has been released before
438 * Reopens the file descriptors associated to the VM and reinstates the
439 * global state, such as the irqchip and the memory regions that are mapped
442 void kvm_vm_restart(struct kvm_vm *vmp)
445 struct userspace_mem_region *region;
448 if (vmp->has_irqchip)
449 vm_create_irqchip(vmp);
451 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) {
452 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
453 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
454 " rc: %i errno: %i\n"
455 " slot: %u flags: 0x%x\n"
456 " guest_phys_addr: 0x%llx size: 0x%llx",
457 ret, errno, region->region.slot,
458 region->region.flags,
459 region->region.guest_phys_addr,
460 region->region.memory_size);
464 __weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm,
467 return __vm_vcpu_add(vm, vcpu_id);
470 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm)
474 return vm_vcpu_recreate(vm, 0);
477 void kvm_pin_this_task_to_pcpu(uint32_t pcpu)
483 CPU_SET(pcpu, &mask);
484 r = sched_setaffinity(0, sizeof(mask), &mask);
485 TEST_ASSERT(!r, "sched_setaffinity() failed for pCPU '%u'.\n", pcpu);
488 static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
490 uint32_t pcpu = atoi_non_negative("CPU number", cpu_str);
492 TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask),
493 "Not allowed to run on pCPU '%d', check cgroups?\n", pcpu);
497 void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
500 cpu_set_t allowed_mask;
501 char *cpu, *cpu_list;
505 cpu_list = strdup(pcpus_string);
506 TEST_ASSERT(cpu_list, "strdup() allocation failed.\n");
508 r = sched_getaffinity(0, sizeof(allowed_mask), &allowed_mask);
509 TEST_ASSERT(!r, "sched_getaffinity() failed");
511 cpu = strtok(cpu_list, delim);
513 /* 1. Get all pcpus for vcpus. */
514 for (i = 0; i < nr_vcpus; i++) {
515 TEST_ASSERT(cpu, "pCPU not provided for vCPU '%d'\n", i);
516 vcpu_to_pcpu[i] = parse_pcpu(cpu, &allowed_mask);
517 cpu = strtok(NULL, delim);
520 /* 2. Check if the main worker needs to be pinned. */
522 kvm_pin_this_task_to_pcpu(parse_pcpu(cpu, &allowed_mask));
523 cpu = strtok(NULL, delim);
526 TEST_ASSERT(!cpu, "pCPU list contains trailing garbage characters '%s'", cpu);
531 * Userspace Memory Region Find
534 * vm - Virtual Machine
535 * start - Starting VM physical address
536 * end - Ending VM physical address, inclusive.
541 * Pointer to overlapping region, NULL if no such region.
543 * Searches for a region with any physical memory that overlaps with
544 * any portion of the guest physical addresses from start to end
545 * inclusive. If multiple overlapping regions exist, a pointer to any
546 * of the regions is returned. Null is returned only when no overlapping
549 static struct userspace_mem_region *
550 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
552 struct rb_node *node;
554 for (node = vm->regions.gpa_tree.rb_node; node; ) {
555 struct userspace_mem_region *region =
556 container_of(node, struct userspace_mem_region, gpa_node);
557 uint64_t existing_start = region->region.guest_phys_addr;
558 uint64_t existing_end = region->region.guest_phys_addr
559 + region->region.memory_size - 1;
560 if (start <= existing_end && end >= existing_start)
563 if (start < existing_start)
564 node = node->rb_left;
566 node = node->rb_right;
573 * KVM Userspace Memory Region Find
576 * vm - Virtual Machine
577 * start - Starting VM physical address
578 * end - Ending VM physical address, inclusive.
583 * Pointer to overlapping region, NULL if no such region.
585 * Public interface to userspace_mem_region_find. Allows tests to look up
586 * the memslot datastructure for a given range of guest physical memory.
588 struct kvm_userspace_memory_region *
589 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
592 struct userspace_mem_region *region;
594 region = userspace_mem_region_find(vm, start, end);
598 return ®ion->region;
601 __weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
610 * vcpu - VCPU to remove
614 * Return: None, TEST_ASSERT failures for all error conditions
616 * Removes a vCPU from a VM and frees its resources.
618 static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
622 if (vcpu->dirty_gfns) {
623 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
624 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
625 vcpu->dirty_gfns = NULL;
628 ret = munmap(vcpu->run, vcpu_mmap_sz());
629 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
631 ret = close(vcpu->fd);
632 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
634 list_del(&vcpu->list);
636 vcpu_arch_free(vcpu);
640 void kvm_vm_release(struct kvm_vm *vmp)
642 struct kvm_vcpu *vcpu, *tmp;
645 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
646 vm_vcpu_rm(vmp, vcpu);
648 ret = close(vmp->fd);
649 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
651 ret = close(vmp->kvm_fd);
652 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
655 static void __vm_mem_region_delete(struct kvm_vm *vm,
656 struct userspace_mem_region *region,
662 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree);
663 rb_erase(®ion->hva_node, &vm->regions.hva_tree);
664 hash_del(®ion->slot_node);
667 region->region.memory_size = 0;
668 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region);
670 sparsebit_free(®ion->unused_phy_pages);
671 ret = munmap(region->mmap_start, region->mmap_size);
672 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
673 if (region->fd >= 0) {
674 /* There's an extra map when using shared memory. */
675 ret = munmap(region->mmap_alias, region->mmap_size);
676 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
684 * Destroys and frees the VM pointed to by vmp.
686 void kvm_vm_free(struct kvm_vm *vmp)
689 struct hlist_node *node;
690 struct userspace_mem_region *region;
695 /* Free cached stats metadata and close FD */
697 free(vmp->stats_desc);
698 close(vmp->stats_fd);
701 /* Free userspace_mem_regions. */
702 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
703 __vm_mem_region_delete(vmp, region, false);
705 /* Free sparsebit arrays. */
706 sparsebit_free(&vmp->vpages_valid);
707 sparsebit_free(&vmp->vpages_mapped);
711 /* Free the structure describing the VM. */
715 int kvm_memfd_alloc(size_t size, bool hugepages)
717 int memfd_flags = MFD_CLOEXEC;
721 memfd_flags |= MFD_HUGETLB;
723 fd = memfd_create("kvm_selftest", memfd_flags);
724 TEST_ASSERT(fd != -1, __KVM_SYSCALL_ERROR("memfd_create()", fd));
726 r = ftruncate(fd, size);
727 TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("ftruncate()", r));
729 r = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, size);
730 TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r));
736 * Memory Compare, host virtual to guest virtual
739 * hva - Starting host virtual address
740 * vm - Virtual Machine
741 * gva - Starting guest virtual address
742 * len - number of bytes to compare
746 * Input/Output Args: None
749 * Returns 0 if the bytes starting at hva for a length of len
750 * are equal the guest virtual bytes starting at gva. Returns
751 * a value < 0, if bytes at hva are less than those at gva.
752 * Otherwise a value > 0 is returned.
754 * Compares the bytes starting at the host virtual address hva, for
755 * a length of len, to the guest bytes starting at the guest virtual
756 * address given by gva.
758 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
763 * Compare a batch of bytes until either a match is found
764 * or all the bytes have been compared.
766 for (uintptr_t offset = 0; offset < len; offset += amt) {
767 uintptr_t ptr1 = (uintptr_t)hva + offset;
770 * Determine host address for guest virtual address
773 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
776 * Determine amount to compare on this pass.
777 * Don't allow the comparsion to cross a page boundary.
780 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
781 amt = vm->page_size - (ptr1 % vm->page_size);
782 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
783 amt = vm->page_size - (ptr2 % vm->page_size);
785 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
786 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
789 * Perform the comparison. If there is a difference
790 * return that result to the caller, otherwise need
791 * to continue on looking for a mismatch.
793 int ret = memcmp((void *)ptr1, (void *)ptr2, amt);
799 * No mismatch found. Let the caller know the two memory
805 static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree,
806 struct userspace_mem_region *region)
808 struct rb_node **cur, *parent;
810 for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) {
811 struct userspace_mem_region *cregion;
813 cregion = container_of(*cur, typeof(*cregion), gpa_node);
815 if (region->region.guest_phys_addr <
816 cregion->region.guest_phys_addr)
817 cur = &(*cur)->rb_left;
819 TEST_ASSERT(region->region.guest_phys_addr !=
820 cregion->region.guest_phys_addr,
821 "Duplicate GPA in region tree");
823 cur = &(*cur)->rb_right;
827 rb_link_node(®ion->gpa_node, parent, cur);
828 rb_insert_color(®ion->gpa_node, gpa_tree);
831 static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree,
832 struct userspace_mem_region *region)
834 struct rb_node **cur, *parent;
836 for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) {
837 struct userspace_mem_region *cregion;
839 cregion = container_of(*cur, typeof(*cregion), hva_node);
841 if (region->host_mem < cregion->host_mem)
842 cur = &(*cur)->rb_left;
844 TEST_ASSERT(region->host_mem !=
846 "Duplicate HVA in region tree");
848 cur = &(*cur)->rb_right;
852 rb_link_node(®ion->hva_node, parent, cur);
853 rb_insert_color(®ion->hva_node, hva_tree);
857 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
858 uint64_t gpa, uint64_t size, void *hva)
860 struct kvm_userspace_memory_region region = {
863 .guest_phys_addr = gpa,
865 .userspace_addr = (uintptr_t)hva,
868 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion);
871 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
872 uint64_t gpa, uint64_t size, void *hva)
874 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva);
876 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed, errno = %d (%s)",
877 errno, strerror(errno));
881 * VM Userspace Memory Region Add
884 * vm - Virtual Machine
885 * src_type - Storage source for this region.
886 * NULL to use anonymous memory.
887 * guest_paddr - Starting guest physical address
888 * slot - KVM region slot
889 * npages - Number of physical pages
890 * flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES)
896 * Allocates a memory area of the number of pages specified by npages
897 * and maps it to the VM specified by vm, at a starting physical address
898 * given by guest_paddr. The region is created with a KVM region slot
899 * given by slot, which must be unique and < KVM_MEM_SLOTS_NUM. The
900 * region is created with the flags given by flags.
902 void vm_userspace_mem_region_add(struct kvm_vm *vm,
903 enum vm_mem_backing_src_type src_type,
904 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
908 struct userspace_mem_region *region;
909 size_t backing_src_pagesz = get_backing_src_pagesz(src_type);
912 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
913 "Number of guest pages is not compatible with the host. "
914 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
916 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
917 "address not on a page boundary.\n"
918 " guest_paddr: 0x%lx vm->page_size: 0x%x",
919 guest_paddr, vm->page_size);
920 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1)
921 <= vm->max_gfn, "Physical range beyond maximum "
922 "supported physical address,\n"
923 " guest_paddr: 0x%lx npages: 0x%lx\n"
924 " vm->max_gfn: 0x%lx vm->page_size: 0x%x",
925 guest_paddr, npages, vm->max_gfn, vm->page_size);
928 * Confirm a mem region with an overlapping address doesn't
931 region = (struct userspace_mem_region *) userspace_mem_region_find(
932 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
934 TEST_FAIL("overlapping userspace_mem_region already "
936 " requested guest_paddr: 0x%lx npages: 0x%lx "
938 " existing guest_paddr: 0x%lx size: 0x%lx",
939 guest_paddr, npages, vm->page_size,
940 (uint64_t) region->region.guest_phys_addr,
941 (uint64_t) region->region.memory_size);
943 /* Confirm no region with the requested slot already exists. */
944 hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
946 if (region->region.slot != slot)
949 TEST_FAIL("A mem region with the requested slot "
951 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
952 " existing slot: %u paddr: 0x%lx size: 0x%lx",
953 slot, guest_paddr, npages,
955 (uint64_t) region->region.guest_phys_addr,
956 (uint64_t) region->region.memory_size);
959 /* Allocate and initialize new mem region structure. */
960 region = calloc(1, sizeof(*region));
961 TEST_ASSERT(region != NULL, "Insufficient Memory");
962 region->mmap_size = npages * vm->page_size;
965 /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
966 alignment = 0x100000;
972 * When using THP mmap is not guaranteed to returned a hugepage aligned
973 * address so we have to pad the mmap. Padding is not needed for HugeTLB
974 * because mmap will always return an address aligned to the HugeTLB
977 if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
978 alignment = max(backing_src_pagesz, alignment);
980 ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
982 /* Add enough memory to align up if necessary */
984 region->mmap_size += alignment;
987 if (backing_src_is_shared(src_type))
988 region->fd = kvm_memfd_alloc(region->mmap_size,
989 src_type == VM_MEM_SRC_SHARED_HUGETLB);
991 region->mmap_start = mmap(NULL, region->mmap_size,
992 PROT_READ | PROT_WRITE,
993 vm_mem_backing_src_alias(src_type)->flag,
995 TEST_ASSERT(region->mmap_start != MAP_FAILED,
996 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
998 TEST_ASSERT(!is_backing_src_hugetlb(src_type) ||
999 region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz),
1000 "mmap_start %p is not aligned to HugeTLB page size 0x%lx",
1001 region->mmap_start, backing_src_pagesz);
1003 /* Align host address */
1004 region->host_mem = align_ptr_up(region->mmap_start, alignment);
1006 /* As needed perform madvise */
1007 if ((src_type == VM_MEM_SRC_ANONYMOUS ||
1008 src_type == VM_MEM_SRC_ANONYMOUS_THP) && thp_configured()) {
1009 ret = madvise(region->host_mem, npages * vm->page_size,
1010 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
1011 TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %s",
1012 region->host_mem, npages * vm->page_size,
1013 vm_mem_backing_src_alias(src_type)->name);
1016 region->backing_src_type = src_type;
1017 region->unused_phy_pages = sparsebit_alloc();
1018 sparsebit_set_num(region->unused_phy_pages,
1019 guest_paddr >> vm->page_shift, npages);
1020 region->region.slot = slot;
1021 region->region.flags = flags;
1022 region->region.guest_phys_addr = guest_paddr;
1023 region->region.memory_size = npages * vm->page_size;
1024 region->region.userspace_addr = (uintptr_t) region->host_mem;
1025 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region);
1026 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
1027 " rc: %i errno: %i\n"
1028 " slot: %u flags: 0x%x\n"
1029 " guest_phys_addr: 0x%lx size: 0x%lx",
1030 ret, errno, slot, flags,
1031 guest_paddr, (uint64_t) region->region.memory_size);
1033 /* Add to quick lookup data structures */
1034 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region);
1035 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region);
1036 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot);
1038 /* If shared memory, create an alias. */
1039 if (region->fd >= 0) {
1040 region->mmap_alias = mmap(NULL, region->mmap_size,
1041 PROT_READ | PROT_WRITE,
1042 vm_mem_backing_src_alias(src_type)->flag,
1044 TEST_ASSERT(region->mmap_alias != MAP_FAILED,
1045 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
1047 /* Align host alias address */
1048 region->host_alias = align_ptr_up(region->mmap_alias, alignment);
1056 * vm - Virtual Machine
1057 * memslot - KVM memory slot ID
1062 * Pointer to memory region structure that describe memory region
1063 * using kvm memory slot ID given by memslot. TEST_ASSERT failure
1064 * on error (e.g. currently no memory region using memslot as a KVM
1067 struct userspace_mem_region *
1068 memslot2region(struct kvm_vm *vm, uint32_t memslot)
1070 struct userspace_mem_region *region;
1072 hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
1074 if (region->region.slot == memslot)
1077 fprintf(stderr, "No mem region with the requested slot found,\n"
1078 " requested slot: %u\n", memslot);
1079 fputs("---- vm dump ----\n", stderr);
1080 vm_dump(stderr, vm, 2);
1081 TEST_FAIL("Mem region not found");
1086 * VM Memory Region Flags Set
1089 * vm - Virtual Machine
1090 * flags - Starting guest physical address
1096 * Sets the flags of the memory region specified by the value of slot,
1097 * to the values given by flags.
1099 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
1102 struct userspace_mem_region *region;
1104 region = memslot2region(vm, slot);
1106 region->region.flags = flags;
1108 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region);
1110 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
1111 " rc: %i errno: %i slot: %u flags: 0x%x",
1112 ret, errno, slot, flags);
1116 * VM Memory Region Move
1119 * vm - Virtual Machine
1120 * slot - Slot of the memory region to move
1121 * new_gpa - Starting guest physical address
1127 * Change the gpa of a memory region.
1129 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
1131 struct userspace_mem_region *region;
1134 region = memslot2region(vm, slot);
1136 region->region.guest_phys_addr = new_gpa;
1138 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region);
1140 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed\n"
1141 "ret: %i errno: %i slot: %u new_gpa: 0x%lx",
1142 ret, errno, slot, new_gpa);
1146 * VM Memory Region Delete
1149 * vm - Virtual Machine
1150 * slot - Slot of the memory region to delete
1156 * Delete a memory region.
1158 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
1160 __vm_mem_region_delete(vm, memslot2region(vm, slot), true);
1163 /* Returns the size of a vCPU's kvm_run structure. */
1164 static int vcpu_mmap_sz(void)
1168 dev_fd = open_kvm_dev_path_or_exit();
1170 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
1171 TEST_ASSERT(ret >= sizeof(struct kvm_run),
1172 KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, ret));
1179 static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id)
1181 struct kvm_vcpu *vcpu;
1183 list_for_each_entry(vcpu, &vm->vcpus, list) {
1184 if (vcpu->id == vcpu_id)
1192 * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
1193 * No additional vCPU setup is done. Returns the vCPU.
1195 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
1197 struct kvm_vcpu *vcpu;
1199 /* Confirm a vcpu with the specified id doesn't already exist. */
1200 TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists\n", vcpu_id);
1202 /* Allocate and initialize new vcpu structure. */
1203 vcpu = calloc(1, sizeof(*vcpu));
1204 TEST_ASSERT(vcpu != NULL, "Insufficient Memory");
1208 vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id);
1209 TEST_ASSERT(vcpu->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu->fd));
1211 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size "
1212 "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
1213 vcpu_mmap_sz(), sizeof(*vcpu->run));
1214 vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(),
1215 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
1216 TEST_ASSERT(vcpu->run != MAP_FAILED,
1217 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
1219 /* Add to linked-list of VCPUs. */
1220 list_add(&vcpu->list, &vm->vcpus);
1226 * VM Virtual Address Unused Gap
1229 * vm - Virtual Machine
1231 * vaddr_min - Minimum Virtual Address
1236 * Lowest virtual address at or below vaddr_min, with at least
1237 * sz unused bytes. TEST_ASSERT failure if no area of at least
1238 * size sz is available.
1240 * Within the VM specified by vm, locates the lowest starting virtual
1241 * address >= vaddr_min, that has at least sz unallocated bytes. A
1242 * TEST_ASSERT failure occurs for invalid input or no area of at least
1243 * sz unallocated bytes >= vaddr_min is available.
1245 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
1246 vm_vaddr_t vaddr_min)
1248 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
1250 /* Determine lowest permitted virtual page index. */
1251 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
1252 if ((pgidx_start * vm->page_size) < vaddr_min)
1255 /* Loop over section with enough valid virtual page indexes. */
1256 if (!sparsebit_is_set_num(vm->vpages_valid,
1257 pgidx_start, pages))
1258 pgidx_start = sparsebit_next_set_num(vm->vpages_valid,
1259 pgidx_start, pages);
1262 * Are there enough unused virtual pages available at
1263 * the currently proposed starting virtual page index.
1264 * If not, adjust proposed starting index to next
1267 if (sparsebit_is_clear_num(vm->vpages_mapped,
1268 pgidx_start, pages))
1270 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped,
1271 pgidx_start, pages);
1272 if (pgidx_start == 0)
1276 * If needed, adjust proposed starting virtual address,
1277 * to next range of valid virtual addresses.
1279 if (!sparsebit_is_set_num(vm->vpages_valid,
1280 pgidx_start, pages)) {
1281 pgidx_start = sparsebit_next_set_num(
1282 vm->vpages_valid, pgidx_start, pages);
1283 if (pgidx_start == 0)
1286 } while (pgidx_start != 0);
1289 TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages);
1295 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid,
1296 pgidx_start, pages),
1297 "Unexpected, invalid virtual page index range,\n"
1298 " pgidx_start: 0x%lx\n"
1300 pgidx_start, pages);
1301 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped,
1302 pgidx_start, pages),
1303 "Unexpected, pages already mapped,\n"
1304 " pgidx_start: 0x%lx\n"
1306 pgidx_start, pages);
1308 return pgidx_start * vm->page_size;
1311 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
1312 enum kvm_mem_region_type type)
1314 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
1317 vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages,
1318 KVM_UTIL_MIN_PFN * vm->page_size,
1319 vm->memslots[type]);
1322 * Find an unused range of virtual page addresses of at least
1325 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
1327 /* Map the virtual pages. */
1328 for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
1329 pages--, vaddr += vm->page_size, paddr += vm->page_size) {
1331 virt_pg_map(vm, vaddr, paddr);
1333 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
1340 * VM Virtual Address Allocate
1343 * vm - Virtual Machine
1344 * sz - Size in bytes
1345 * vaddr_min - Minimum starting virtual address
1350 * Starting guest virtual address
1352 * Allocates at least sz bytes within the virtual address space of the vm
1353 * given by vm. The allocated bytes are mapped to a virtual address >=
1354 * the address given by vaddr_min. Note that each allocation uses a
1355 * a unique set of pages, with the minimum real allocation being at least
1356 * a page. The allocated physical space comes from the TEST_DATA memory region.
1358 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
1360 return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA);
1364 * VM Virtual Address Allocate Pages
1367 * vm - Virtual Machine
1372 * Starting guest virtual address
1374 * Allocates at least N system pages worth of bytes within the virtual address
1377 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages)
1379 return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
1382 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type)
1384 return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type);
1388 * VM Virtual Address Allocate Page
1391 * vm - Virtual Machine
1396 * Starting guest virtual address
1398 * Allocates at least one system page worth of bytes within the virtual address
1401 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm)
1403 return vm_vaddr_alloc_pages(vm, 1);
1407 * Map a range of VM virtual address to the VM's physical address
1410 * vm - Virtual Machine
1411 * vaddr - Virtuall address to map
1412 * paddr - VM Physical Address
1413 * npages - The number of pages to map
1419 * Within the VM given by @vm, creates a virtual translation for
1420 * @npages starting at @vaddr to the page range starting at @paddr.
1422 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
1423 unsigned int npages)
1425 size_t page_size = vm->page_size;
1426 size_t size = npages * page_size;
1428 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
1429 TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
1432 virt_pg_map(vm, vaddr, paddr);
1433 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
1441 * Address VM Physical to Host Virtual
1444 * vm - Virtual Machine
1445 * gpa - VM physical address
1450 * Equivalent host virtual address
1452 * Locates the memory region containing the VM physical address given
1453 * by gpa, within the VM given by vm. When found, the host virtual
1454 * address providing the memory to the vm physical address is returned.
1455 * A TEST_ASSERT failure occurs if no region containing gpa exists.
1457 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
1459 struct userspace_mem_region *region;
1461 region = userspace_mem_region_find(vm, gpa, gpa);
1463 TEST_FAIL("No vm physical memory at 0x%lx", gpa);
1467 return (void *)((uintptr_t)region->host_mem
1468 + (gpa - region->region.guest_phys_addr));
1472 * Address Host Virtual to VM Physical
1475 * vm - Virtual Machine
1476 * hva - Host virtual address
1481 * Equivalent VM physical address
1483 * Locates the memory region containing the host virtual address given
1484 * by hva, within the VM given by vm. When found, the equivalent
1485 * VM physical address is returned. A TEST_ASSERT failure occurs if no
1486 * region containing hva exists.
1488 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
1490 struct rb_node *node;
1492 for (node = vm->regions.hva_tree.rb_node; node; ) {
1493 struct userspace_mem_region *region =
1494 container_of(node, struct userspace_mem_region, hva_node);
1496 if (hva >= region->host_mem) {
1497 if (hva <= (region->host_mem
1498 + region->region.memory_size - 1))
1499 return (vm_paddr_t)((uintptr_t)
1500 region->region.guest_phys_addr
1501 + (hva - (uintptr_t)region->host_mem));
1503 node = node->rb_right;
1505 node = node->rb_left;
1508 TEST_FAIL("No mapping to a guest physical address, hva: %p", hva);
1513 * Address VM physical to Host Virtual *alias*.
1516 * vm - Virtual Machine
1517 * gpa - VM physical address
1522 * Equivalent address within the host virtual *alias* area, or NULL
1523 * (without failing the test) if the guest memory is not shared (so
1526 * Create a writable, shared virtual=>physical alias for the specific GPA.
1527 * The primary use case is to allow the host selftest to manipulate guest
1528 * memory without mapping said memory in the guest's address space. And, for
1529 * userfaultfd-based demand paging, to do so without triggering userfaults.
1531 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
1533 struct userspace_mem_region *region;
1536 region = userspace_mem_region_find(vm, gpa, gpa);
1540 if (!region->host_alias)
1543 offset = gpa - region->region.guest_phys_addr;
1544 return (void *) ((uintptr_t) region->host_alias + offset);
1547 /* Create an interrupt controller chip for the specified VM. */
1548 void vm_create_irqchip(struct kvm_vm *vm)
1550 vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL);
1552 vm->has_irqchip = true;
1555 int _vcpu_run(struct kvm_vcpu *vcpu)
1560 rc = __vcpu_run(vcpu);
1561 } while (rc == -1 && errno == EINTR);
1563 assert_on_unhandled_exception(vcpu);
1569 * Invoke KVM_RUN on a vCPU until KVM returns something other than -EINTR.
1570 * Assert if the KVM returns an error (other than -EINTR).
1572 void vcpu_run(struct kvm_vcpu *vcpu)
1574 int ret = _vcpu_run(vcpu);
1576 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret));
1579 void vcpu_run_complete_io(struct kvm_vcpu *vcpu)
1583 vcpu->run->immediate_exit = 1;
1584 ret = __vcpu_run(vcpu);
1585 vcpu->run->immediate_exit = 0;
1587 TEST_ASSERT(ret == -1 && errno == EINTR,
1588 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
1593 * Get the list of guest registers which are supported for
1594 * KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls. Returns a kvm_reg_list pointer,
1595 * it is the caller's responsibility to free the list.
1597 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu)
1599 struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list;
1602 ret = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, ®_list_n);
1603 TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0");
1605 reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64));
1606 reg_list->n = reg_list_n.n;
1607 vcpu_ioctl(vcpu, KVM_GET_REG_LIST, reg_list);
1611 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
1613 uint32_t page_size = getpagesize();
1614 uint32_t size = vcpu->vm->dirty_ring_size;
1616 TEST_ASSERT(size > 0, "Should enable dirty ring first");
1618 if (!vcpu->dirty_gfns) {
1621 addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, vcpu->fd,
1622 page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1623 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private");
1625 addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, vcpu->fd,
1626 page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1627 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec");
1629 addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd,
1630 page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1631 TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed");
1633 vcpu->dirty_gfns = addr;
1634 vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn);
1637 return vcpu->dirty_gfns;
1644 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
1646 struct kvm_device_attr attribute = {
1652 return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute);
1655 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type)
1657 struct kvm_create_device create_dev = {
1659 .flags = KVM_CREATE_DEVICE_TEST,
1662 return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
1665 int __kvm_create_device(struct kvm_vm *vm, uint64_t type)
1667 struct kvm_create_device create_dev = {
1674 err = __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
1675 TEST_ASSERT(err <= 0, "KVM_CREATE_DEVICE shouldn't return a positive value");
1676 return err ? : create_dev.fd;
1679 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val)
1681 struct kvm_device_attr kvmattr = {
1685 .addr = (uintptr_t)val,
1688 return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr);
1691 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val)
1693 struct kvm_device_attr kvmattr = {
1697 .addr = (uintptr_t)val,
1700 return __kvm_ioctl(dev_fd, KVM_SET_DEVICE_ATTR, &kvmattr);
1704 * IRQ related functions.
1707 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
1709 struct kvm_irq_level irq_level = {
1714 return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
1717 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
1719 int ret = _kvm_irq_line(vm, irq, level);
1721 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret));
1724 struct kvm_irq_routing *kvm_gsi_routing_create(void)
1726 struct kvm_irq_routing *routing;
1729 size = sizeof(struct kvm_irq_routing);
1730 /* Allocate space for the max number of entries: this wastes 196 KBs. */
1731 size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry);
1732 routing = calloc(1, size);
1738 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
1739 uint32_t gsi, uint32_t pin)
1744 assert(routing->nr < KVM_MAX_IRQ_ROUTES);
1747 routing->entries[i].gsi = gsi;
1748 routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP;
1749 routing->entries[i].flags = 0;
1750 routing->entries[i].u.irqchip.irqchip = 0;
1751 routing->entries[i].u.irqchip.pin = pin;
1755 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
1760 ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing);
1766 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
1770 ret = _kvm_gsi_routing_write(vm, routing);
1771 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_GSI_ROUTING, ret));
1778 * vm - Virtual Machine
1779 * indent - Left margin indent amount
1782 * stream - Output FILE stream
1786 * Dumps the current state of the VM given by vm, to the FILE stream
1789 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1792 struct userspace_mem_region *region;
1793 struct kvm_vcpu *vcpu;
1795 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
1796 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
1797 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
1798 fprintf(stream, "%*sMem Regions:\n", indent, "");
1799 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
1800 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
1801 "host_virt: %p\n", indent + 2, "",
1802 (uint64_t) region->region.guest_phys_addr,
1803 (uint64_t) region->region.memory_size,
1805 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
1806 sparsebit_dump(stream, region->unused_phy_pages, 0);
1808 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
1809 sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
1810 fprintf(stream, "%*spgd_created: %u\n", indent, "",
1812 if (vm->pgd_created) {
1813 fprintf(stream, "%*sVirtual Translation Tables:\n",
1815 virt_dump(stream, vm, indent + 4);
1817 fprintf(stream, "%*sVCPUs:\n", indent, "");
1819 list_for_each_entry(vcpu, &vm->vcpus, list)
1820 vcpu_dump(stream, vcpu, indent + 2);
1823 #define KVM_EXIT_STRING(x) {KVM_EXIT_##x, #x}
1825 /* Known KVM exit reasons */
1826 static struct exit_reason {
1827 unsigned int reason;
1829 } exit_reasons_known[] = {
1830 KVM_EXIT_STRING(UNKNOWN),
1831 KVM_EXIT_STRING(EXCEPTION),
1832 KVM_EXIT_STRING(IO),
1833 KVM_EXIT_STRING(HYPERCALL),
1834 KVM_EXIT_STRING(DEBUG),
1835 KVM_EXIT_STRING(HLT),
1836 KVM_EXIT_STRING(MMIO),
1837 KVM_EXIT_STRING(IRQ_WINDOW_OPEN),
1838 KVM_EXIT_STRING(SHUTDOWN),
1839 KVM_EXIT_STRING(FAIL_ENTRY),
1840 KVM_EXIT_STRING(INTR),
1841 KVM_EXIT_STRING(SET_TPR),
1842 KVM_EXIT_STRING(TPR_ACCESS),
1843 KVM_EXIT_STRING(S390_SIEIC),
1844 KVM_EXIT_STRING(S390_RESET),
1845 KVM_EXIT_STRING(DCR),
1846 KVM_EXIT_STRING(NMI),
1847 KVM_EXIT_STRING(INTERNAL_ERROR),
1848 KVM_EXIT_STRING(OSI),
1849 KVM_EXIT_STRING(PAPR_HCALL),
1850 KVM_EXIT_STRING(S390_UCONTROL),
1851 KVM_EXIT_STRING(WATCHDOG),
1852 KVM_EXIT_STRING(S390_TSCH),
1853 KVM_EXIT_STRING(EPR),
1854 KVM_EXIT_STRING(SYSTEM_EVENT),
1855 KVM_EXIT_STRING(S390_STSI),
1856 KVM_EXIT_STRING(IOAPIC_EOI),
1857 KVM_EXIT_STRING(HYPERV),
1858 KVM_EXIT_STRING(ARM_NISV),
1859 KVM_EXIT_STRING(X86_RDMSR),
1860 KVM_EXIT_STRING(X86_WRMSR),
1861 KVM_EXIT_STRING(DIRTY_RING_FULL),
1862 KVM_EXIT_STRING(AP_RESET_HOLD),
1863 KVM_EXIT_STRING(X86_BUS_LOCK),
1864 KVM_EXIT_STRING(XEN),
1865 KVM_EXIT_STRING(RISCV_SBI),
1866 KVM_EXIT_STRING(RISCV_CSR),
1867 KVM_EXIT_STRING(NOTIFY),
1868 #ifdef KVM_EXIT_MEMORY_NOT_PRESENT
1869 KVM_EXIT_STRING(MEMORY_NOT_PRESENT),
1874 * Exit Reason String
1877 * exit_reason - Exit reason
1882 * Constant string pointer describing the exit reason.
1884 * Locates and returns a constant string that describes the KVM exit
1885 * reason given by exit_reason. If no such string is found, a constant
1886 * string of "Unknown" is returned.
1888 const char *exit_reason_str(unsigned int exit_reason)
1892 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) {
1893 if (exit_reason == exit_reasons_known[n1].reason)
1894 return exit_reasons_known[n1].name;
1901 * Physical Contiguous Page Allocator
1904 * vm - Virtual Machine
1905 * num - number of pages
1906 * paddr_min - Physical address minimum
1907 * memslot - Memory region to allocate page from
1912 * Starting physical address
1914 * Within the VM specified by vm, locates a range of available physical
1915 * pages at or above paddr_min. If found, the pages are marked as in use
1916 * and their base address is returned. A TEST_ASSERT failure occurs if
1917 * not enough pages are available at or above paddr_min.
1919 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
1920 vm_paddr_t paddr_min, uint32_t memslot)
1922 struct userspace_mem_region *region;
1923 sparsebit_idx_t pg, base;
1925 TEST_ASSERT(num > 0, "Must allocate at least one page");
1927 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
1928 "not divisible by page size.\n"
1929 " paddr_min: 0x%lx page_size: 0x%x",
1930 paddr_min, vm->page_size);
1932 region = memslot2region(vm, memslot);
1933 base = pg = paddr_min >> vm->page_shift;
1936 for (; pg < base + num; ++pg) {
1937 if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
1938 base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
1942 } while (pg && pg != base + num);
1945 fprintf(stderr, "No guest physical page available, "
1946 "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
1947 paddr_min, vm->page_size, memslot);
1948 fputs("---- vm dump ----\n", stderr);
1949 vm_dump(stderr, vm, 2);
1953 for (pg = base; pg < base + num; ++pg)
1954 sparsebit_clear(region->unused_phy_pages, pg);
1956 return base * vm->page_size;
1959 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
1962 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
1965 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
1967 return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR,
1968 vm->memslots[MEM_REGION_PT]);
1972 * Address Guest Virtual to Host Virtual
1975 * vm - Virtual Machine
1976 * gva - VM virtual address
1981 * Equivalent host virtual address
1983 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
1985 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
1988 unsigned long __weak vm_compute_max_gfn(struct kvm_vm *vm)
1990 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
1993 static unsigned int vm_calc_num_pages(unsigned int num_pages,
1994 unsigned int page_shift,
1995 unsigned int new_page_shift,
1998 unsigned int n = 1 << (new_page_shift - page_shift);
2000 if (page_shift >= new_page_shift)
2001 return num_pages * (1 << (page_shift - new_page_shift));
2003 return num_pages / n + !!(ceil && num_pages % n);
2006 static inline int getpageshift(void)
2008 return __builtin_ffs(getpagesize()) - 1;
2012 vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
2014 return vm_calc_num_pages(num_guest_pages,
2015 vm_guest_mode_params[mode].page_shift,
2016 getpageshift(), true);
2020 vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages)
2022 return vm_calc_num_pages(num_host_pages, getpageshift(),
2023 vm_guest_mode_params[mode].page_shift, false);
2026 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size)
2029 n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size);
2030 return vm_adjust_num_guest_pages(mode, n);
2034 * Read binary stats descriptors
2037 * stats_fd - the file descriptor for the binary stats file from which to read
2038 * header - the binary stats metadata header corresponding to the given FD
2043 * A pointer to a newly allocated series of stat descriptors.
2044 * Caller is responsible for freeing the returned kvm_stats_desc.
2046 * Read the stats descriptors from the binary stats interface.
2048 struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
2049 struct kvm_stats_header *header)
2051 struct kvm_stats_desc *stats_desc;
2052 ssize_t desc_size, total_size, ret;
2054 desc_size = get_stats_descriptor_size(header);
2055 total_size = header->num_desc * desc_size;
2057 stats_desc = calloc(header->num_desc, desc_size);
2058 TEST_ASSERT(stats_desc, "Allocate memory for stats descriptors");
2060 ret = pread(stats_fd, stats_desc, total_size, header->desc_offset);
2061 TEST_ASSERT(ret == total_size, "Read KVM stats descriptors");
2067 * Read stat data for a particular stat
2070 * stats_fd - the file descriptor for the binary stats file from which to read
2071 * header - the binary stats metadata header corresponding to the given FD
2072 * desc - the binary stat metadata for the particular stat to be read
2073 * max_elements - the maximum number of 8-byte values to read into data
2076 * data - the buffer into which stat data should be read
2078 * Read the data values of a specified stat from the binary stats interface.
2080 void read_stat_data(int stats_fd, struct kvm_stats_header *header,
2081 struct kvm_stats_desc *desc, uint64_t *data,
2082 size_t max_elements)
2084 size_t nr_elements = min_t(ssize_t, desc->size, max_elements);
2085 size_t size = nr_elements * sizeof(*data);
2088 TEST_ASSERT(desc->size, "No elements in stat '%s'", desc->name);
2089 TEST_ASSERT(max_elements, "Zero elements requested for stat '%s'", desc->name);
2091 ret = pread(stats_fd, data, size,
2092 header->data_offset + desc->offset);
2094 TEST_ASSERT(ret >= 0, "pread() failed on stat '%s', errno: %i (%s)",
2095 desc->name, errno, strerror(errno));
2096 TEST_ASSERT(ret == size,
2097 "pread() on stat '%s' read %ld bytes, wanted %lu bytes",
2098 desc->name, size, ret);
2102 * Read the data of the named stat
2105 * vm - the VM for which the stat should be read
2106 * stat_name - the name of the stat to read
2107 * max_elements - the maximum number of 8-byte values to read into data
2110 * data - the buffer into which stat data should be read
2112 * Read the data values of a specified stat from the binary stats interface.
2114 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
2115 size_t max_elements)
2117 struct kvm_stats_desc *desc;
2121 if (!vm->stats_fd) {
2122 vm->stats_fd = vm_get_stats_fd(vm);
2123 read_stats_header(vm->stats_fd, &vm->stats_header);
2124 vm->stats_desc = read_stats_descriptors(vm->stats_fd,
2128 size_desc = get_stats_descriptor_size(&vm->stats_header);
2130 for (i = 0; i < vm->stats_header.num_desc; ++i) {
2131 desc = (void *)vm->stats_desc + (i * size_desc);
2133 if (strcmp(desc->name, stat_name))
2136 read_stat_data(vm->stats_fd, &vm->stats_header, desc,
2137 data, max_elements);
2143 __weak void kvm_arch_vm_post_create(struct kvm_vm *vm)
2147 __weak void kvm_selftest_arch_init(void)
2151 void __attribute((constructor)) kvm_selftest_init(void)
2153 /* Tell stdout not to buffer its content. */
2154 setbuf(stdout, NULL);
2156 kvm_selftest_arch_init();