1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
14 #include <linux/eventfd.h>
15 #include <linux/kvm_host.h>
16 #include <linux/sched/stat.h>
18 #include <trace/events/kvm.h>
19 #include <xen/interface/xen.h>
20 #include <xen/interface/vcpu.h>
21 #include <xen/interface/event_channel.h>
25 static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
26 static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r);
28 DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ);
30 static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
32 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
33 struct pvclock_wall_clock *wc;
34 gpa_t gpa = gfn_to_gpa(gfn);
39 int idx = srcu_read_lock(&kvm->srcu);
41 if (gfn == GPA_INVALID) {
42 kvm_gfn_to_pfn_cache_destroy(kvm, gpc);
47 ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, KVM_HOST_USES_PFN,
53 * This code mirrors kvm_write_wall_clock() except that it writes
54 * directly through the pfn cache and doesn't mark the page dirty.
56 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm);
58 /* It could be invalid again already, so we need to check */
59 read_lock_irq(&gpc->lock);
64 read_unlock_irq(&gpc->lock);
67 /* Paranoia checks on the 32-bit struct layout */
68 BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900);
69 BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924);
70 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
73 /* Paranoia checks on the 64-bit struct layout */
74 BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00);
75 BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c);
77 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
78 struct shared_info *shinfo = gpc->khva;
80 wc_sec_hi = &shinfo->wc_sec_hi;
85 struct compat_shared_info *shinfo = gpc->khva;
87 wc_sec_hi = &shinfo->arch.wc_sec_hi;
91 /* Increment and ensure an odd value */
92 wc_version = wc->version = (wc->version + 1) | 1;
95 wc->nsec = do_div(wall_nsec, 1000000000);
96 wc->sec = (u32)wall_nsec;
97 *wc_sec_hi = wall_nsec >> 32;
100 wc->version = wc_version + 1;
101 read_unlock_irq(&gpc->lock);
103 kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
106 srcu_read_unlock(&kvm->srcu, idx);
110 static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
112 struct kvm_vcpu_xen *vx = &v->arch.xen;
113 u64 now = get_kvmclock_ns(v->kvm);
114 u64 delta_ns = now - vx->runstate_entry_time;
115 u64 run_delay = current->sched_info.run_delay;
117 if (unlikely(!vx->runstate_entry_time))
118 vx->current_runstate = RUNSTATE_offline;
121 * Time waiting for the scheduler isn't "stolen" if the
122 * vCPU wasn't running anyway.
124 if (vx->current_runstate == RUNSTATE_running) {
125 u64 steal_ns = run_delay - vx->last_steal;
127 delta_ns -= steal_ns;
129 vx->runstate_times[RUNSTATE_runnable] += steal_ns;
131 vx->last_steal = run_delay;
133 vx->runstate_times[vx->current_runstate] += delta_ns;
134 vx->current_runstate = state;
135 vx->runstate_entry_time = now;
138 void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
140 struct kvm_vcpu_xen *vx = &v->arch.xen;
141 struct gfn_to_pfn_cache *gpc = &vx->runstate_cache;
142 uint64_t *user_times;
147 kvm_xen_update_runstate(v, state);
149 if (!vx->runstate_cache.active)
152 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode)
153 user_len = sizeof(struct vcpu_runstate_info);
155 user_len = sizeof(struct compat_vcpu_runstate_info);
157 read_lock_irqsave(&gpc->lock, flags);
158 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
160 read_unlock_irqrestore(&gpc->lock, flags);
162 /* When invoked from kvm_sched_out() we cannot sleep */
163 if (state == RUNSTATE_runnable)
166 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, user_len))
169 read_lock_irqsave(&gpc->lock, flags);
173 * The only difference between 32-bit and 64-bit versions of the
174 * runstate struct us the alignment of uint64_t in 32-bit, which
175 * means that the 64-bit version has an additional 4 bytes of
176 * padding after the first field 'state'.
178 * So we use 'int __user *user_state' to point to the state field,
179 * and 'uint64_t __user *user_times' for runstate_entry_time. So
180 * the actual array of time[] in each state starts at user_times[1].
182 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
183 BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
184 BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
186 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
187 offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
188 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
189 offsetof(struct compat_vcpu_runstate_info, time) + 4);
192 user_state = gpc->khva;
194 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode)
195 user_times = gpc->khva + offsetof(struct vcpu_runstate_info,
198 user_times = gpc->khva + offsetof(struct compat_vcpu_runstate_info,
202 * First write the updated state_entry_time at the appropriate
203 * location determined by 'offset'.
205 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) !=
206 sizeof(user_times[0]));
207 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
208 sizeof(user_times[0]));
210 user_times[0] = vx->runstate_entry_time | XEN_RUNSTATE_UPDATE;
214 * Next, write the new runstate. This is in the *same* place
215 * for 32-bit and 64-bit guests, asserted here for paranoia.
217 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) !=
218 offsetof(struct compat_vcpu_runstate_info, state));
219 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state) !=
220 sizeof(vx->current_runstate));
221 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
222 sizeof(vx->current_runstate));
224 *user_state = vx->current_runstate;
227 * Write the actual runstate times immediately after the
228 * runstate_entry_time.
230 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
231 offsetof(struct vcpu_runstate_info, time) - sizeof(u64));
232 BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) !=
233 offsetof(struct compat_vcpu_runstate_info, time) - sizeof(u64));
234 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
235 sizeof_field(struct compat_vcpu_runstate_info, time));
236 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
237 sizeof(vx->runstate_times));
239 memcpy(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times));
243 * Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
244 * runstate_entry_time field.
246 user_times[0] &= ~XEN_RUNSTATE_UPDATE;
249 read_unlock_irqrestore(&gpc->lock, flags);
251 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
255 * On event channel delivery, the vcpu_info may not have been accessible.
256 * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which
257 * need to be marked into the vcpu_info (and evtchn_upcall_pending set).
258 * Do so now that we can sleep in the context of the vCPU to bring the
259 * page in, and refresh the pfn cache for it.
261 void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
263 unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel);
264 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
267 if (!evtchn_pending_sel)
271 * Yes, this is an open-coded loop. But that's just what put_user()
272 * does anyway. Page it in and retry the instruction. We're just a
273 * little more honest about it.
275 read_lock_irqsave(&gpc->lock, flags);
276 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
277 sizeof(struct vcpu_info))) {
278 read_unlock_irqrestore(&gpc->lock, flags);
280 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa,
281 sizeof(struct vcpu_info)))
284 read_lock_irqsave(&gpc->lock, flags);
287 /* Now gpc->khva is a valid kernel address for the vcpu_info */
288 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) {
289 struct vcpu_info *vi = gpc->khva;
291 asm volatile(LOCK_PREFIX "orq %0, %1\n"
293 LOCK_PREFIX "andq %0, %2\n"
294 : "=r" (evtchn_pending_sel),
295 "+m" (vi->evtchn_pending_sel),
296 "+m" (v->arch.xen.evtchn_pending_sel)
297 : "0" (evtchn_pending_sel));
298 WRITE_ONCE(vi->evtchn_upcall_pending, 1);
300 u32 evtchn_pending_sel32 = evtchn_pending_sel;
301 struct compat_vcpu_info *vi = gpc->khva;
303 asm volatile(LOCK_PREFIX "orl %0, %1\n"
305 LOCK_PREFIX "andl %0, %2\n"
306 : "=r" (evtchn_pending_sel32),
307 "+m" (vi->evtchn_pending_sel),
308 "+m" (v->arch.xen.evtchn_pending_sel)
309 : "0" (evtchn_pending_sel32));
310 WRITE_ONCE(vi->evtchn_upcall_pending, 1);
312 read_unlock_irqrestore(&gpc->lock, flags);
314 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
317 int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
319 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
324 * If the global upcall vector (HVMIRQ_callback_vector) is set and
325 * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending.
328 /* No need for compat handling here */
329 BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) !=
330 offsetof(struct compat_vcpu_info, evtchn_upcall_pending));
331 BUILD_BUG_ON(sizeof(rc) !=
332 sizeof_field(struct vcpu_info, evtchn_upcall_pending));
333 BUILD_BUG_ON(sizeof(rc) !=
334 sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
336 read_lock_irqsave(&gpc->lock, flags);
337 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
338 sizeof(struct vcpu_info))) {
339 read_unlock_irqrestore(&gpc->lock, flags);
342 * This function gets called from kvm_vcpu_block() after setting the
343 * task to TASK_INTERRUPTIBLE, to see if it needs to wake immediately
344 * from a HLT. So we really mustn't sleep. If the page ended up absent
345 * at that point, just return 1 in order to trigger an immediate wake,
346 * and we'll end up getting called again from a context where we *can*
347 * fault in the page and wait for it.
349 if (in_atomic() || !task_is_running(current))
352 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa,
353 sizeof(struct vcpu_info))) {
355 * If this failed, userspace has screwed up the
356 * vcpu_info mapping. No interrupts for you.
360 read_lock_irqsave(&gpc->lock, flags);
363 rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending;
364 read_unlock_irqrestore(&gpc->lock, flags);
368 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
373 switch (data->type) {
374 case KVM_XEN_ATTR_TYPE_LONG_MODE:
375 if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
378 mutex_lock(&kvm->lock);
379 kvm->arch.xen.long_mode = !!data->u.long_mode;
380 mutex_unlock(&kvm->lock);
385 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
386 mutex_lock(&kvm->lock);
387 r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
388 mutex_unlock(&kvm->lock);
391 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
392 if (data->u.vector && data->u.vector < 0x10)
395 mutex_lock(&kvm->lock);
396 kvm->arch.xen.upcall_vector = data->u.vector;
397 mutex_unlock(&kvm->lock);
402 case KVM_XEN_ATTR_TYPE_EVTCHN:
403 r = kvm_xen_setattr_evtchn(kvm, data);
413 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
417 mutex_lock(&kvm->lock);
419 switch (data->type) {
420 case KVM_XEN_ATTR_TYPE_LONG_MODE:
421 data->u.long_mode = kvm->arch.xen.long_mode;
425 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
426 if (kvm->arch.xen.shinfo_cache.active)
427 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
429 data->u.shared_info.gfn = GPA_INVALID;
433 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
434 data->u.vector = kvm->arch.xen.upcall_vector;
442 mutex_unlock(&kvm->lock);
446 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
448 int idx, r = -ENOENT;
450 mutex_lock(&vcpu->kvm->lock);
451 idx = srcu_read_lock(&vcpu->kvm->srcu);
453 switch (data->type) {
454 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
455 /* No compat necessary here. */
456 BUILD_BUG_ON(sizeof(struct vcpu_info) !=
457 sizeof(struct compat_vcpu_info));
458 BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
459 offsetof(struct compat_vcpu_info, time));
461 if (data->u.gpa == GPA_INVALID) {
462 kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache);
467 r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
468 &vcpu->arch.xen.vcpu_info_cache,
469 NULL, KVM_HOST_USES_PFN, data->u.gpa,
470 sizeof(struct vcpu_info));
472 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
476 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
477 if (data->u.gpa == GPA_INVALID) {
478 kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
479 &vcpu->arch.xen.vcpu_time_info_cache);
484 r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
485 &vcpu->arch.xen.vcpu_time_info_cache,
486 NULL, KVM_HOST_USES_PFN, data->u.gpa,
487 sizeof(struct pvclock_vcpu_time_info));
489 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
492 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
493 if (!sched_info_on()) {
497 if (data->u.gpa == GPA_INVALID) {
498 kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
499 &vcpu->arch.xen.runstate_cache);
504 r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
505 &vcpu->arch.xen.runstate_cache,
506 NULL, KVM_HOST_USES_PFN, data->u.gpa,
507 sizeof(struct vcpu_runstate_info));
510 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
511 if (!sched_info_on()) {
515 if (data->u.runstate.state > RUNSTATE_offline) {
520 kvm_xen_update_runstate(vcpu, data->u.runstate.state);
524 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
525 if (!sched_info_on()) {
529 if (data->u.runstate.state > RUNSTATE_offline) {
533 if (data->u.runstate.state_entry_time !=
534 (data->u.runstate.time_running +
535 data->u.runstate.time_runnable +
536 data->u.runstate.time_blocked +
537 data->u.runstate.time_offline)) {
541 if (get_kvmclock_ns(vcpu->kvm) <
542 data->u.runstate.state_entry_time) {
547 vcpu->arch.xen.current_runstate = data->u.runstate.state;
548 vcpu->arch.xen.runstate_entry_time =
549 data->u.runstate.state_entry_time;
550 vcpu->arch.xen.runstate_times[RUNSTATE_running] =
551 data->u.runstate.time_running;
552 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] =
553 data->u.runstate.time_runnable;
554 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] =
555 data->u.runstate.time_blocked;
556 vcpu->arch.xen.runstate_times[RUNSTATE_offline] =
557 data->u.runstate.time_offline;
558 vcpu->arch.xen.last_steal = current->sched_info.run_delay;
562 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
563 if (!sched_info_on()) {
567 if (data->u.runstate.state > RUNSTATE_offline &&
568 data->u.runstate.state != (u64)-1) {
572 /* The adjustment must add up */
573 if (data->u.runstate.state_entry_time !=
574 (data->u.runstate.time_running +
575 data->u.runstate.time_runnable +
576 data->u.runstate.time_blocked +
577 data->u.runstate.time_offline)) {
582 if (get_kvmclock_ns(vcpu->kvm) <
583 (vcpu->arch.xen.runstate_entry_time +
584 data->u.runstate.state_entry_time)) {
589 vcpu->arch.xen.runstate_entry_time +=
590 data->u.runstate.state_entry_time;
591 vcpu->arch.xen.runstate_times[RUNSTATE_running] +=
592 data->u.runstate.time_running;
593 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] +=
594 data->u.runstate.time_runnable;
595 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] +=
596 data->u.runstate.time_blocked;
597 vcpu->arch.xen.runstate_times[RUNSTATE_offline] +=
598 data->u.runstate.time_offline;
600 if (data->u.runstate.state <= RUNSTATE_offline)
601 kvm_xen_update_runstate(vcpu, data->u.runstate.state);
609 srcu_read_unlock(&vcpu->kvm->srcu, idx);
610 mutex_unlock(&vcpu->kvm->lock);
614 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
618 mutex_lock(&vcpu->kvm->lock);
620 switch (data->type) {
621 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
622 if (vcpu->arch.xen.vcpu_info_cache.active)
623 data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
625 data->u.gpa = GPA_INVALID;
629 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
630 if (vcpu->arch.xen.vcpu_time_info_cache.active)
631 data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
633 data->u.gpa = GPA_INVALID;
637 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
638 if (!sched_info_on()) {
642 if (vcpu->arch.xen.runstate_cache.active) {
643 data->u.gpa = vcpu->arch.xen.runstate_cache.gpa;
648 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
649 if (!sched_info_on()) {
653 data->u.runstate.state = vcpu->arch.xen.current_runstate;
657 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
658 if (!sched_info_on()) {
662 data->u.runstate.state = vcpu->arch.xen.current_runstate;
663 data->u.runstate.state_entry_time =
664 vcpu->arch.xen.runstate_entry_time;
665 data->u.runstate.time_running =
666 vcpu->arch.xen.runstate_times[RUNSTATE_running];
667 data->u.runstate.time_runnable =
668 vcpu->arch.xen.runstate_times[RUNSTATE_runnable];
669 data->u.runstate.time_blocked =
670 vcpu->arch.xen.runstate_times[RUNSTATE_blocked];
671 data->u.runstate.time_offline =
672 vcpu->arch.xen.runstate_times[RUNSTATE_offline];
676 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
684 mutex_unlock(&vcpu->kvm->lock);
688 int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
690 struct kvm *kvm = vcpu->kvm;
691 u32 page_num = data & ~PAGE_MASK;
692 u64 page_addr = data & PAGE_MASK;
693 bool lm = is_long_mode(vcpu);
695 /* Latch long_mode for shared_info pages etc. */
696 vcpu->kvm->arch.xen.long_mode = lm;
699 * If Xen hypercall intercept is enabled, fill the hypercall
700 * page with VMCALL/VMMCALL instructions since that's what
701 * we catch. Else the VMM has provided the hypercall pages
702 * with instructions of its own choosing, so use those.
704 if (kvm_xen_hypercall_enabled(kvm)) {
711 /* mov imm32, %eax */
712 instructions[0] = 0xb8;
714 /* vmcall / vmmcall */
715 static_call(kvm_x86_patch_hypercall)(vcpu, instructions + 5);
718 instructions[8] = 0xc3;
721 memset(instructions + 9, 0xcc, sizeof(instructions) - 9);
723 for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) {
724 *(u32 *)&instructions[1] = i;
725 if (kvm_vcpu_write_guest(vcpu,
726 page_addr + (i * sizeof(instructions)),
727 instructions, sizeof(instructions)))
732 * Note, truncation is a non-issue as 'lm' is guaranteed to be
733 * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes.
735 hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64
736 : kvm->arch.xen_hvm_config.blob_addr_32;
737 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
738 : kvm->arch.xen_hvm_config.blob_size_32;
741 if (page_num >= blob_size)
744 blob_addr += page_num * PAGE_SIZE;
746 page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE);
748 return PTR_ERR(page);
750 if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) {
758 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
760 if (xhc->flags & ~KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL)
764 * With hypercall interception the kernel generates its own
765 * hypercall page so it must not be provided.
767 if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) &&
768 (xhc->blob_addr_32 || xhc->blob_addr_64 ||
769 xhc->blob_size_32 || xhc->blob_size_64))
772 mutex_lock(&kvm->lock);
774 if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
775 static_branch_inc(&kvm_xen_enabled.key);
776 else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
777 static_branch_slow_dec_deferred(&kvm_xen_enabled);
779 memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
781 mutex_unlock(&kvm->lock);
785 static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
787 kvm_rax_write(vcpu, result);
788 return kvm_skip_emulated_instruction(vcpu);
791 static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
793 struct kvm_run *run = vcpu->run;
795 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip)))
798 return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result);
801 int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
804 u64 input, params[6], r = -ENOSYS;
805 bool handled = false;
807 input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
809 /* Hyper-V hypercalls get bit 31 set in EAX */
810 if ((input & 0x80000000) &&
811 kvm_hv_hypercall_enabled(vcpu))
812 return kvm_hv_hypercall(vcpu);
814 longmode = is_64_bit_hypercall(vcpu);
816 params[0] = (u32)kvm_rbx_read(vcpu);
817 params[1] = (u32)kvm_rcx_read(vcpu);
818 params[2] = (u32)kvm_rdx_read(vcpu);
819 params[3] = (u32)kvm_rsi_read(vcpu);
820 params[4] = (u32)kvm_rdi_read(vcpu);
821 params[5] = (u32)kvm_rbp_read(vcpu);
825 params[0] = (u64)kvm_rdi_read(vcpu);
826 params[1] = (u64)kvm_rsi_read(vcpu);
827 params[2] = (u64)kvm_rdx_read(vcpu);
828 params[3] = (u64)kvm_r10_read(vcpu);
829 params[4] = (u64)kvm_r8_read(vcpu);
830 params[5] = (u64)kvm_r9_read(vcpu);
833 trace_kvm_xen_hypercall(input, params[0], params[1], params[2],
834 params[3], params[4], params[5]);
837 case __HYPERVISOR_event_channel_op:
838 if (params[0] == EVTCHNOP_send)
839 handled = kvm_xen_hcall_evtchn_send(vcpu, params[1], &r);
847 return kvm_xen_hypercall_set_result(vcpu, r);
849 vcpu->run->exit_reason = KVM_EXIT_XEN;
850 vcpu->run->xen.type = KVM_EXIT_XEN_HCALL;
851 vcpu->run->xen.u.hcall.longmode = longmode;
852 vcpu->run->xen.u.hcall.cpl = static_call(kvm_x86_get_cpl)(vcpu);
853 vcpu->run->xen.u.hcall.input = input;
854 vcpu->run->xen.u.hcall.params[0] = params[0];
855 vcpu->run->xen.u.hcall.params[1] = params[1];
856 vcpu->run->xen.u.hcall.params[2] = params[2];
857 vcpu->run->xen.u.hcall.params[3] = params[3];
858 vcpu->run->xen.u.hcall.params[4] = params[4];
859 vcpu->run->xen.u.hcall.params[5] = params[5];
860 vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu);
861 vcpu->arch.complete_userspace_io =
862 kvm_xen_hypercall_complete_userspace;
867 static inline int max_evtchn_port(struct kvm *kvm)
869 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode)
870 return EVTCHN_2L_NR_CHANNELS;
872 return COMPAT_EVTCHN_2L_NR_CHANNELS;
876 * The return value from this function is propagated to kvm_set_irq() API,
878 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
879 * = 0 Interrupt was coalesced (previous irq is still pending)
880 * > 0 Number of CPUs interrupt was delivered to
882 * It is also called directly from kvm_arch_set_irq_inatomic(), where the
883 * only check on its return value is a comparison with -EWOULDBLOCK'.
885 int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
887 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
888 struct kvm_vcpu *vcpu;
889 unsigned long *pending_bits, *mask_bits;
892 bool kick_vcpu = false;
893 int vcpu_idx, idx, rc;
895 vcpu_idx = READ_ONCE(xe->vcpu_idx);
897 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
899 vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id);
902 WRITE_ONCE(xe->vcpu_idx, kvm_vcpu_get_idx(vcpu));
905 if (!vcpu->arch.xen.vcpu_info_cache.active)
908 if (xe->port >= max_evtchn_port(kvm))
913 idx = srcu_read_lock(&kvm->srcu);
915 read_lock_irqsave(&gpc->lock, flags);
916 if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
919 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
920 struct shared_info *shinfo = gpc->khva;
921 pending_bits = (unsigned long *)&shinfo->evtchn_pending;
922 mask_bits = (unsigned long *)&shinfo->evtchn_mask;
923 port_word_bit = xe->port / 64;
925 struct compat_shared_info *shinfo = gpc->khva;
926 pending_bits = (unsigned long *)&shinfo->evtchn_pending;
927 mask_bits = (unsigned long *)&shinfo->evtchn_mask;
928 port_word_bit = xe->port / 32;
932 * If this port wasn't already set, and if it isn't masked, then
933 * we try to set the corresponding bit in the in-kernel shadow of
934 * evtchn_pending_sel for the target vCPU. And if *that* wasn't
935 * already set, then we kick the vCPU in question to write to the
936 * *real* evtchn_pending_sel in its own guest vcpu_info struct.
938 if (test_and_set_bit(xe->port, pending_bits)) {
939 rc = 0; /* It was already raised */
940 } else if (test_bit(xe->port, mask_bits)) {
941 rc = -ENOTCONN; /* Masked */
943 rc = 1; /* Delivered to the bitmap in shared_info. */
944 /* Now switch to the vCPU's vcpu_info to set the index and pending_sel */
945 read_unlock_irqrestore(&gpc->lock, flags);
946 gpc = &vcpu->arch.xen.vcpu_info_cache;
948 read_lock_irqsave(&gpc->lock, flags);
949 if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, sizeof(struct vcpu_info))) {
951 * Could not access the vcpu_info. Set the bit in-kernel
952 * and prod the vCPU to deliver it for itself.
954 if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
959 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
960 struct vcpu_info *vcpu_info = gpc->khva;
961 if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) {
962 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
966 struct compat_vcpu_info *vcpu_info = gpc->khva;
967 if (!test_and_set_bit(port_word_bit,
968 (unsigned long *)&vcpu_info->evtchn_pending_sel)) {
969 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
976 read_unlock_irqrestore(&gpc->lock, flags);
977 srcu_read_unlock(&kvm->srcu, idx);
980 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
987 static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
989 bool mm_borrowed = false;
992 rc = kvm_xen_set_evtchn_fast(xe, kvm);
993 if (rc != -EWOULDBLOCK)
996 if (current->mm != kvm->mm) {
998 * If not on a thread which already belongs to this KVM,
999 * we'd better be in the irqfd workqueue.
1001 if (WARN_ON_ONCE(current->mm))
1004 kthread_use_mm(kvm->mm);
1009 * For the irqfd workqueue, using the main kvm->lock mutex is
1010 * fine since this function is invoked from kvm_set_irq() with
1011 * no other lock held, no srcu. In future if it will be called
1012 * directly from a vCPU thread (e.g. on hypercall for an IPI)
1013 * then it may need to switch to using a leaf-node mutex for
1014 * serializing the shared_info mapping.
1016 mutex_lock(&kvm->lock);
1019 * It is theoretically possible for the page to be unmapped
1020 * and the MMU notifier to invalidate the shared_info before
1021 * we even get to use it. In that case, this looks like an
1022 * infinite loop. It was tempting to do it via the userspace
1023 * HVA instead... but that just *hides* the fact that it's
1024 * an infinite loop, because if a fault occurs and it waits
1025 * for the page to come back, it can *still* immediately
1026 * fault and have to wait again, repeatedly.
1028 * Conversely, the page could also have been reinstated by
1029 * another thread before we even obtain the mutex above, so
1030 * check again *first* before remapping it.
1033 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1036 rc = kvm_xen_set_evtchn_fast(xe, kvm);
1037 if (rc != -EWOULDBLOCK)
1040 idx = srcu_read_lock(&kvm->srcu);
1041 rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE);
1042 srcu_read_unlock(&kvm->srcu, idx);
1045 mutex_unlock(&kvm->lock);
1048 kthread_unuse_mm(kvm->mm);
1053 /* This is the version called from kvm_set_irq() as the .set function */
1054 static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1055 int irq_source_id, int level, bool line_status)
1060 return kvm_xen_set_evtchn(&e->xen_evtchn, kvm);
1064 * Set up an event channel interrupt from the KVM IRQ routing table.
1065 * Used for e.g. PIRQ from passed through physical devices.
1067 int kvm_xen_setup_evtchn(struct kvm *kvm,
1068 struct kvm_kernel_irq_routing_entry *e,
1069 const struct kvm_irq_routing_entry *ue)
1072 struct kvm_vcpu *vcpu;
1074 if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm))
1077 /* We only support 2 level event channels for now */
1078 if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1082 * Xen gives us interesting mappings from vCPU index to APIC ID,
1083 * which means kvm_get_vcpu_by_id() has to iterate over all vCPUs
1084 * to find it. Do that once at setup time, instead of every time.
1085 * But beware that on live update / live migration, the routing
1086 * table might be reinstated before the vCPU threads have finished
1087 * recreating their vCPUs.
1089 vcpu = kvm_get_vcpu_by_id(kvm, ue->u.xen_evtchn.vcpu);
1091 e->xen_evtchn.vcpu_idx = kvm_vcpu_get_idx(vcpu);
1093 e->xen_evtchn.vcpu_idx = -1;
1095 e->xen_evtchn.port = ue->u.xen_evtchn.port;
1096 e->xen_evtchn.vcpu_id = ue->u.xen_evtchn.vcpu;
1097 e->xen_evtchn.priority = ue->u.xen_evtchn.priority;
1098 e->set = evtchn_set_fn;
1104 * Explicit event sending from userspace with KVM_XEN_HVM_EVTCHN_SEND ioctl.
1106 int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *uxe)
1108 struct kvm_xen_evtchn e;
1111 if (!uxe->port || uxe->port >= max_evtchn_port(kvm))
1114 /* We only support 2 level event channels for now */
1115 if (uxe->priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1119 e.vcpu_id = uxe->vcpu;
1121 e.priority = uxe->priority;
1123 ret = kvm_xen_set_evtchn(&e, kvm);
1126 * None of that 'return 1 if it actually got delivered' nonsense.
1127 * We don't care if it was masked (-ENOTCONN) either.
1129 if (ret > 0 || ret == -ENOTCONN)
1136 * Support for *outbound* event channel events via the EVTCHNOP_send hypercall.
1142 struct kvm_xen_evtchn port;
1144 u32 port; /* zero */
1145 struct eventfd_ctx *ctx;
1151 * Update target vCPU or priority for a registered sending channel.
1153 static int kvm_xen_eventfd_update(struct kvm *kvm,
1154 struct kvm_xen_hvm_attr *data)
1156 u32 port = data->u.evtchn.send_port;
1157 struct evtchnfd *evtchnfd;
1159 if (!port || port >= max_evtchn_port(kvm))
1162 mutex_lock(&kvm->lock);
1163 evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port);
1164 mutex_unlock(&kvm->lock);
1169 /* For an UPDATE, nothing may change except the priority/vcpu */
1170 if (evtchnfd->type != data->u.evtchn.type)
1174 * Port cannot change, and if it's zero that was an eventfd
1175 * which can't be changed either.
1177 if (!evtchnfd->deliver.port.port ||
1178 evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port)
1181 /* We only support 2 level event channels for now */
1182 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1185 mutex_lock(&kvm->lock);
1186 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
1187 if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) {
1188 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
1189 evtchnfd->deliver.port.vcpu_idx = -1;
1191 mutex_unlock(&kvm->lock);
1196 * Configure the target (eventfd or local port delivery) for sending on
1197 * a given event channel.
1199 static int kvm_xen_eventfd_assign(struct kvm *kvm,
1200 struct kvm_xen_hvm_attr *data)
1202 u32 port = data->u.evtchn.send_port;
1203 struct eventfd_ctx *eventfd = NULL;
1204 struct evtchnfd *evtchnfd = NULL;
1207 if (!port || port >= max_evtchn_port(kvm))
1210 evtchnfd = kzalloc(sizeof(struct evtchnfd), GFP_KERNEL);
1214 switch(data->u.evtchn.type) {
1215 case EVTCHNSTAT_ipi:
1216 /* IPI must map back to the same port# */
1217 if (data->u.evtchn.deliver.port.port != data->u.evtchn.send_port)
1218 goto out; /* -EINVAL */
1221 case EVTCHNSTAT_interdomain:
1222 if (data->u.evtchn.deliver.port.port) {
1223 if (data->u.evtchn.deliver.port.port >= max_evtchn_port(kvm))
1224 goto out; /* -EINVAL */
1226 eventfd = eventfd_ctx_fdget(data->u.evtchn.deliver.eventfd.fd);
1227 if (IS_ERR(eventfd)) {
1228 ret = PTR_ERR(eventfd);
1234 case EVTCHNSTAT_virq:
1235 case EVTCHNSTAT_closed:
1236 case EVTCHNSTAT_unbound:
1237 case EVTCHNSTAT_pirq:
1238 default: /* Unknown event channel type */
1239 goto out; /* -EINVAL */
1242 evtchnfd->send_port = data->u.evtchn.send_port;
1243 evtchnfd->type = data->u.evtchn.type;
1245 evtchnfd->deliver.eventfd.ctx = eventfd;
1247 /* We only support 2 level event channels for now */
1248 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1249 goto out; /* -EINVAL; */
1251 evtchnfd->deliver.port.port = data->u.evtchn.deliver.port.port;
1252 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
1253 evtchnfd->deliver.port.vcpu_idx = -1;
1254 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
1257 mutex_lock(&kvm->lock);
1258 ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1,
1260 mutex_unlock(&kvm->lock);
1268 eventfd_ctx_put(eventfd);
1273 static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
1275 struct evtchnfd *evtchnfd;
1277 mutex_lock(&kvm->lock);
1278 evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port);
1279 mutex_unlock(&kvm->lock);
1285 synchronize_srcu(&kvm->srcu);
1286 if (!evtchnfd->deliver.port.port)
1287 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
1292 static int kvm_xen_eventfd_reset(struct kvm *kvm)
1294 struct evtchnfd *evtchnfd;
1297 mutex_lock(&kvm->lock);
1298 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
1299 idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port);
1300 synchronize_srcu(&kvm->srcu);
1301 if (!evtchnfd->deliver.port.port)
1302 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
1305 mutex_unlock(&kvm->lock);
1310 static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
1312 u32 port = data->u.evtchn.send_port;
1314 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_RESET)
1315 return kvm_xen_eventfd_reset(kvm);
1317 if (!port || port >= max_evtchn_port(kvm))
1320 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_DEASSIGN)
1321 return kvm_xen_eventfd_deassign(kvm, port);
1322 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_UPDATE)
1323 return kvm_xen_eventfd_update(kvm, data);
1324 if (data->u.evtchn.flags)
1327 return kvm_xen_eventfd_assign(kvm, data);
1330 static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
1332 struct evtchnfd *evtchnfd;
1333 struct evtchn_send send;
1337 idx = srcu_read_lock(&vcpu->kvm->srcu);
1338 gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
1339 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1341 if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &send, sizeof(send))) {
1346 /* The evtchn_ports idr is protected by vcpu->kvm->srcu */
1347 evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port);
1351 if (evtchnfd->deliver.port.port) {
1352 int ret = kvm_xen_set_evtchn(&evtchnfd->deliver.port, vcpu->kvm);
1353 if (ret < 0 && ret != -ENOTCONN)
1356 eventfd_signal(evtchnfd->deliver.eventfd.ctx, 1);
1363 void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
1365 kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
1366 &vcpu->arch.xen.runstate_cache);
1367 kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
1368 &vcpu->arch.xen.vcpu_info_cache);
1369 kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
1370 &vcpu->arch.xen.vcpu_time_info_cache);
1373 void kvm_xen_init_vm(struct kvm *kvm)
1375 idr_init(&kvm->arch.xen.evtchn_ports);
1378 void kvm_xen_destroy_vm(struct kvm *kvm)
1380 struct evtchnfd *evtchnfd;
1383 kvm_gfn_to_pfn_cache_destroy(kvm, &kvm->arch.xen.shinfo_cache);
1385 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
1386 if (!evtchnfd->deliver.port.port)
1387 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
1390 idr_destroy(&kvm->arch.xen.evtchn_ports);
1392 if (kvm->arch.xen_hvm_config.msr)
1393 static_branch_slow_dec_deferred(&kvm_xen_enabled);