1 // SPDX-License-Identifier: GPL-2.0
3 * steal/stolen time test
5 * Copyright (C) 2020, Red Hat, Inc.
12 #include <linux/kernel.h>
14 #include <asm/kvm_para.h>
16 #include "test_util.h"
18 #include "processor.h"
21 #define ST_GPA_BASE (1 << 30)
23 static void *st_gva[NR_VCPUS];
24 static uint64_t guest_stolen_time[NR_VCPUS];
26 #if defined(__x86_64__)
28 /* steal_time must have 64-byte alignment */
29 #define STEAL_TIME_SIZE ((sizeof(struct kvm_steal_time) + 63) & ~63)
31 static void check_status(struct kvm_steal_time *st)
33 GUEST_ASSERT(!(READ_ONCE(st->version) & 1));
34 GUEST_ASSERT(READ_ONCE(st->flags) == 0);
35 GUEST_ASSERT(READ_ONCE(st->preempted) == 0);
38 static void guest_code(int cpu)
40 struct kvm_steal_time *st = st_gva[cpu];
43 GUEST_ASSERT(rdmsr(MSR_KVM_STEAL_TIME) == ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED));
45 memset(st, 0, sizeof(*st));
49 WRITE_ONCE(guest_stolen_time[cpu], st->steal);
50 version = READ_ONCE(st->version);
55 GUEST_ASSERT(version < READ_ONCE(st->version));
56 WRITE_ONCE(guest_stolen_time[cpu], st->steal);
61 static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
63 return kvm_cpu_has(X86_FEATURE_KVM_STEAL_TIME);
66 static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
70 /* ST_GPA_BASE is identity mapped */
71 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
72 sync_global_to_guest(vcpu->vm, st_gva[i]);
74 ret = _vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME,
75 (ulong)st_gva[i] | KVM_STEAL_RESERVED_MASK);
76 TEST_ASSERT(ret == 0, "Bad GPA didn't fail");
78 vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED);
81 static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
83 struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
86 pr_info("VCPU%d:\n", vcpu_idx);
87 pr_info(" steal: %lld\n", st->steal);
88 pr_info(" version: %d\n", st->version);
89 pr_info(" flags: %d\n", st->flags);
90 pr_info(" preempted: %d\n", st->preempted);
92 for (i = 0; i < 3; ++i)
93 pr_info("%d", st->u8_pad[i]);
95 for (i = 0; i < 11; ++i)
96 pr_info("%d", st->pad[i]);
100 #elif defined(__aarch64__)
102 /* PV_TIME_ST must have 64-byte alignment */
103 #define STEAL_TIME_SIZE ((sizeof(struct st_time) + 63) & ~63)
105 #define SMCCC_ARCH_FEATURES 0x80000001
106 #define PV_TIME_FEATURES 0xc5000020
107 #define PV_TIME_ST 0xc5000021
115 static int64_t smccc(uint32_t func, uint64_t arg)
117 struct arm_smccc_res res;
119 smccc_hvc(func, arg, 0, 0, 0, 0, 0, 0, &res);
123 static void check_status(struct st_time *st)
125 GUEST_ASSERT(READ_ONCE(st->rev) == 0);
126 GUEST_ASSERT(READ_ONCE(st->attr) == 0);
129 static void guest_code(int cpu)
134 status = smccc(SMCCC_ARCH_FEATURES, PV_TIME_FEATURES);
135 GUEST_ASSERT(status == 0);
136 status = smccc(PV_TIME_FEATURES, PV_TIME_FEATURES);
137 GUEST_ASSERT(status == 0);
138 status = smccc(PV_TIME_FEATURES, PV_TIME_ST);
139 GUEST_ASSERT(status == 0);
141 status = smccc(PV_TIME_ST, 0);
142 GUEST_ASSERT(status != -1);
143 GUEST_ASSERT(status == (ulong)st_gva[cpu]);
145 st = (struct st_time *)status;
149 WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
153 WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
157 static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
159 struct kvm_device_attr dev = {
160 .group = KVM_ARM_VCPU_PVTIME_CTRL,
161 .attr = KVM_ARM_VCPU_PVTIME_IPA,
164 return !__vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
167 static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
169 struct kvm_vm *vm = vcpu->vm;
173 struct kvm_device_attr dev = {
174 .group = KVM_ARM_VCPU_PVTIME_CTRL,
175 .attr = KVM_ARM_VCPU_PVTIME_IPA,
176 .addr = (uint64_t)&st_ipa,
179 vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
181 /* ST_GPA_BASE is identity mapped */
182 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
183 sync_global_to_guest(vm, st_gva[i]);
185 st_ipa = (ulong)st_gva[i] | 1;
186 ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
187 TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL");
189 st_ipa = (ulong)st_gva[i];
190 vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
192 ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
193 TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST");
196 static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
198 struct st_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
200 pr_info("VCPU%d:\n", vcpu_idx);
201 pr_info(" rev: %d\n", st->rev);
202 pr_info(" attr: %d\n", st->attr);
203 pr_info(" st_time: %ld\n", st->st_time);
208 static void *do_steal_time(void *arg)
210 struct timespec ts, stop;
212 clock_gettime(CLOCK_MONOTONIC, &ts);
213 stop = timespec_add_ns(ts, MIN_RUN_DELAY_NS);
216 clock_gettime(CLOCK_MONOTONIC, &ts);
217 if (timespec_to_ns(timespec_sub(ts, stop)) >= 0)
224 static void run_vcpu(struct kvm_vcpu *vcpu)
230 switch (get_ucall(vcpu, &uc)) {
235 REPORT_GUEST_ASSERT(uc);
237 TEST_ASSERT(false, "Unexpected exit: %s",
238 exit_reason_str(vcpu->run->exit_reason));
242 int main(int ac, char **av)
244 struct kvm_vcpu *vcpus[NR_VCPUS];
255 verbose = ac > 1 && (!strncmp(av[1], "-v", 3) || !strncmp(av[1], "--verbose", 10));
257 /* Set CPU affinity so we can force preemption of the VCPU */
260 pthread_attr_init(&attr);
261 pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
262 pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
264 /* Create a VM and an identity mapped memslot for the steal time structure */
265 vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus);
266 gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS);
267 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
268 virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages);
270 TEST_REQUIRE(is_steal_time_supported(vcpus[0]));
272 /* Run test on each VCPU */
273 for (i = 0; i < NR_VCPUS; ++i) {
274 steal_time_init(vcpus[i], i);
276 vcpu_args_set(vcpus[i], 1, i);
278 /* First VCPU run initializes steal-time */
281 /* Second VCPU run, expect guest stolen time to be <= run_delay */
283 sync_global_from_guest(vm, guest_stolen_time[i]);
284 stolen_time = guest_stolen_time[i];
285 run_delay = get_run_delay();
286 TEST_ASSERT(stolen_time <= run_delay,
287 "Expected stolen time <= %ld, got %ld",
288 run_delay, stolen_time);
290 /* Steal time from the VCPU. The steal time thread has the same CPU affinity as the VCPUs. */
291 run_delay = get_run_delay();
292 pthread_create(&thread, &attr, do_steal_time, NULL);
295 while (get_run_delay() - run_delay < MIN_RUN_DELAY_NS);
296 pthread_join(thread, NULL);
297 run_delay = get_run_delay() - run_delay;
298 TEST_ASSERT(run_delay >= MIN_RUN_DELAY_NS,
299 "Expected run_delay >= %ld, got %ld",
300 MIN_RUN_DELAY_NS, run_delay);
302 /* Run VCPU again to confirm stolen time is consistent with run_delay */
304 sync_global_from_guest(vm, guest_stolen_time[i]);
305 stolen_time = guest_stolen_time[i] - stolen_time;
306 TEST_ASSERT(stolen_time >= run_delay,
307 "Expected stolen time >= %ld, got %ld",
308 run_delay, stolen_time);
311 pr_info("VCPU%d: total-stolen-time=%ld test-stolen-time=%ld", i,
312 guest_stolen_time[i], stolen_time);
313 if (stolen_time == run_delay)
314 pr_info(" (BONUS: guest test-stolen-time even exactly matches test-run_delay)");
316 steal_time_dump(vm, i);