]> Git Repo - linux.git/blob - arch/x86/kvm/xen.c
KVM: x86/xen: intercept EVTCHNOP_send from guests
[linux.git] / arch / x86 / kvm / xen.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4  * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5  *
6  * KVM Xen emulation
7  */
8
9 #include "x86.h"
10 #include "xen.h"
11 #include "lapic.h"
12 #include "hyperv.h"
13
14 #include <linux/eventfd.h>
15 #include <linux/kvm_host.h>
16 #include <linux/sched/stat.h>
17
18 #include <trace/events/kvm.h>
19 #include <xen/interface/xen.h>
20 #include <xen/interface/vcpu.h>
21 #include <xen/interface/event_channel.h>
22
23 #include "trace.h"
24
25 static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
26 static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r);
27
28 DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ);
29
30 static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
31 {
32         struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
33         struct pvclock_wall_clock *wc;
34         gpa_t gpa = gfn_to_gpa(gfn);
35         u32 *wc_sec_hi;
36         u32 wc_version;
37         u64 wall_nsec;
38         int ret = 0;
39         int idx = srcu_read_lock(&kvm->srcu);
40
41         if (gfn == GPA_INVALID) {
42                 kvm_gfn_to_pfn_cache_destroy(kvm, gpc);
43                 goto out;
44         }
45
46         do {
47                 ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, KVM_HOST_USES_PFN,
48                                                 gpa, PAGE_SIZE);
49                 if (ret)
50                         goto out;
51
52                 /*
53                  * This code mirrors kvm_write_wall_clock() except that it writes
54                  * directly through the pfn cache and doesn't mark the page dirty.
55                  */
56                 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm);
57
58                 /* It could be invalid again already, so we need to check */
59                 read_lock_irq(&gpc->lock);
60
61                 if (gpc->valid)
62                         break;
63
64                 read_unlock_irq(&gpc->lock);
65         } while (1);
66
67         /* Paranoia checks on the 32-bit struct layout */
68         BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900);
69         BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924);
70         BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
71
72 #ifdef CONFIG_X86_64
73         /* Paranoia checks on the 64-bit struct layout */
74         BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00);
75         BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c);
76
77         if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
78                 struct shared_info *shinfo = gpc->khva;
79
80                 wc_sec_hi = &shinfo->wc_sec_hi;
81                 wc = &shinfo->wc;
82         } else
83 #endif
84         {
85                 struct compat_shared_info *shinfo = gpc->khva;
86
87                 wc_sec_hi = &shinfo->arch.wc_sec_hi;
88                 wc = &shinfo->wc;
89         }
90
91         /* Increment and ensure an odd value */
92         wc_version = wc->version = (wc->version + 1) | 1;
93         smp_wmb();
94
95         wc->nsec = do_div(wall_nsec,  1000000000);
96         wc->sec = (u32)wall_nsec;
97         *wc_sec_hi = wall_nsec >> 32;
98         smp_wmb();
99
100         wc->version = wc_version + 1;
101         read_unlock_irq(&gpc->lock);
102
103         kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
104
105 out:
106         srcu_read_unlock(&kvm->srcu, idx);
107         return ret;
108 }
109
110 static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
111 {
112         struct kvm_vcpu_xen *vx = &v->arch.xen;
113         u64 now = get_kvmclock_ns(v->kvm);
114         u64 delta_ns = now - vx->runstate_entry_time;
115         u64 run_delay = current->sched_info.run_delay;
116
117         if (unlikely(!vx->runstate_entry_time))
118                 vx->current_runstate = RUNSTATE_offline;
119
120         /*
121          * Time waiting for the scheduler isn't "stolen" if the
122          * vCPU wasn't running anyway.
123          */
124         if (vx->current_runstate == RUNSTATE_running) {
125                 u64 steal_ns = run_delay - vx->last_steal;
126
127                 delta_ns -= steal_ns;
128
129                 vx->runstate_times[RUNSTATE_runnable] += steal_ns;
130         }
131         vx->last_steal = run_delay;
132
133         vx->runstate_times[vx->current_runstate] += delta_ns;
134         vx->current_runstate = state;
135         vx->runstate_entry_time = now;
136 }
137
138 void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
139 {
140         struct kvm_vcpu_xen *vx = &v->arch.xen;
141         struct gfn_to_pfn_cache *gpc = &vx->runstate_cache;
142         uint64_t *user_times;
143         unsigned long flags;
144         size_t user_len;
145         int *user_state;
146
147         kvm_xen_update_runstate(v, state);
148
149         if (!vx->runstate_cache.active)
150                 return;
151
152         if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode)
153                 user_len = sizeof(struct vcpu_runstate_info);
154         else
155                 user_len = sizeof(struct compat_vcpu_runstate_info);
156
157         read_lock_irqsave(&gpc->lock, flags);
158         while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
159                                            user_len)) {
160                 read_unlock_irqrestore(&gpc->lock, flags);
161
162                 /* When invoked from kvm_sched_out() we cannot sleep */
163                 if (state == RUNSTATE_runnable)
164                         return;
165
166                 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, user_len))
167                         return;
168
169                 read_lock_irqsave(&gpc->lock, flags);
170         }
171
172         /*
173          * The only difference between 32-bit and 64-bit versions of the
174          * runstate struct us the alignment of uint64_t in 32-bit, which
175          * means that the 64-bit version has an additional 4 bytes of
176          * padding after the first field 'state'.
177          *
178          * So we use 'int __user *user_state' to point to the state field,
179          * and 'uint64_t __user *user_times' for runstate_entry_time. So
180          * the actual array of time[] in each state starts at user_times[1].
181          */
182         BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
183         BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
184         BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
185 #ifdef CONFIG_X86_64
186         BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
187                      offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
188         BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
189                      offsetof(struct compat_vcpu_runstate_info, time) + 4);
190 #endif
191
192         user_state = gpc->khva;
193
194         if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode)
195                 user_times = gpc->khva + offsetof(struct vcpu_runstate_info,
196                                                   state_entry_time);
197         else
198                 user_times = gpc->khva + offsetof(struct compat_vcpu_runstate_info,
199                                                   state_entry_time);
200
201         /*
202          * First write the updated state_entry_time at the appropriate
203          * location determined by 'offset'.
204          */
205         BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) !=
206                      sizeof(user_times[0]));
207         BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
208                      sizeof(user_times[0]));
209
210         user_times[0] = vx->runstate_entry_time | XEN_RUNSTATE_UPDATE;
211         smp_wmb();
212
213         /*
214          * Next, write the new runstate. This is in the *same* place
215          * for 32-bit and 64-bit guests, asserted here for paranoia.
216          */
217         BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) !=
218                      offsetof(struct compat_vcpu_runstate_info, state));
219         BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state) !=
220                      sizeof(vx->current_runstate));
221         BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
222                      sizeof(vx->current_runstate));
223
224         *user_state = vx->current_runstate;
225
226         /*
227          * Write the actual runstate times immediately after the
228          * runstate_entry_time.
229          */
230         BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
231                      offsetof(struct vcpu_runstate_info, time) - sizeof(u64));
232         BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) !=
233                      offsetof(struct compat_vcpu_runstate_info, time) - sizeof(u64));
234         BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
235                      sizeof_field(struct compat_vcpu_runstate_info, time));
236         BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
237                      sizeof(vx->runstate_times));
238
239         memcpy(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times));
240         smp_wmb();
241
242         /*
243          * Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
244          * runstate_entry_time field.
245          */
246         user_times[0] &= ~XEN_RUNSTATE_UPDATE;
247         smp_wmb();
248
249         read_unlock_irqrestore(&gpc->lock, flags);
250
251         mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
252 }
253
254 /*
255  * On event channel delivery, the vcpu_info may not have been accessible.
256  * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which
257  * need to be marked into the vcpu_info (and evtchn_upcall_pending set).
258  * Do so now that we can sleep in the context of the vCPU to bring the
259  * page in, and refresh the pfn cache for it.
260  */
261 void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
262 {
263         unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel);
264         struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
265         unsigned long flags;
266
267         if (!evtchn_pending_sel)
268                 return;
269
270         /*
271          * Yes, this is an open-coded loop. But that's just what put_user()
272          * does anyway. Page it in and retry the instruction. We're just a
273          * little more honest about it.
274          */
275         read_lock_irqsave(&gpc->lock, flags);
276         while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
277                                            sizeof(struct vcpu_info))) {
278                 read_unlock_irqrestore(&gpc->lock, flags);
279
280                 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa,
281                                                  sizeof(struct vcpu_info)))
282                         return;
283
284                 read_lock_irqsave(&gpc->lock, flags);
285         }
286
287         /* Now gpc->khva is a valid kernel address for the vcpu_info */
288         if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) {
289                 struct vcpu_info *vi = gpc->khva;
290
291                 asm volatile(LOCK_PREFIX "orq %0, %1\n"
292                              "notq %0\n"
293                              LOCK_PREFIX "andq %0, %2\n"
294                              : "=r" (evtchn_pending_sel),
295                                "+m" (vi->evtchn_pending_sel),
296                                "+m" (v->arch.xen.evtchn_pending_sel)
297                              : "0" (evtchn_pending_sel));
298                 WRITE_ONCE(vi->evtchn_upcall_pending, 1);
299         } else {
300                 u32 evtchn_pending_sel32 = evtchn_pending_sel;
301                 struct compat_vcpu_info *vi = gpc->khva;
302
303                 asm volatile(LOCK_PREFIX "orl %0, %1\n"
304                              "notl %0\n"
305                              LOCK_PREFIX "andl %0, %2\n"
306                              : "=r" (evtchn_pending_sel32),
307                                "+m" (vi->evtchn_pending_sel),
308                                "+m" (v->arch.xen.evtchn_pending_sel)
309                              : "0" (evtchn_pending_sel32));
310                 WRITE_ONCE(vi->evtchn_upcall_pending, 1);
311         }
312         read_unlock_irqrestore(&gpc->lock, flags);
313
314         mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
315 }
316
317 int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
318 {
319         struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
320         unsigned long flags;
321         u8 rc = 0;
322
323         /*
324          * If the global upcall vector (HVMIRQ_callback_vector) is set and
325          * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending.
326          */
327
328         /* No need for compat handling here */
329         BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) !=
330                      offsetof(struct compat_vcpu_info, evtchn_upcall_pending));
331         BUILD_BUG_ON(sizeof(rc) !=
332                      sizeof_field(struct vcpu_info, evtchn_upcall_pending));
333         BUILD_BUG_ON(sizeof(rc) !=
334                      sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
335
336         read_lock_irqsave(&gpc->lock, flags);
337         while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
338                                            sizeof(struct vcpu_info))) {
339                 read_unlock_irqrestore(&gpc->lock, flags);
340
341                 /*
342                  * This function gets called from kvm_vcpu_block() after setting the
343                  * task to TASK_INTERRUPTIBLE, to see if it needs to wake immediately
344                  * from a HLT. So we really mustn't sleep. If the page ended up absent
345                  * at that point, just return 1 in order to trigger an immediate wake,
346                  * and we'll end up getting called again from a context where we *can*
347                  * fault in the page and wait for it.
348                  */
349                 if (in_atomic() || !task_is_running(current))
350                         return 1;
351
352                 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa,
353                                                  sizeof(struct vcpu_info))) {
354                         /*
355                          * If this failed, userspace has screwed up the
356                          * vcpu_info mapping. No interrupts for you.
357                          */
358                         return 0;
359                 }
360                 read_lock_irqsave(&gpc->lock, flags);
361         }
362
363         rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending;
364         read_unlock_irqrestore(&gpc->lock, flags);
365         return rc;
366 }
367
368 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
369 {
370         int r = -ENOENT;
371
372
373         switch (data->type) {
374         case KVM_XEN_ATTR_TYPE_LONG_MODE:
375                 if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
376                         r = -EINVAL;
377                 } else {
378                         mutex_lock(&kvm->lock);
379                         kvm->arch.xen.long_mode = !!data->u.long_mode;
380                         mutex_unlock(&kvm->lock);
381                         r = 0;
382                 }
383                 break;
384
385         case KVM_XEN_ATTR_TYPE_SHARED_INFO:
386                 mutex_lock(&kvm->lock);
387                 r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
388                 mutex_unlock(&kvm->lock);
389                 break;
390
391         case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
392                 if (data->u.vector && data->u.vector < 0x10)
393                         r = -EINVAL;
394                 else {
395                         mutex_lock(&kvm->lock);
396                         kvm->arch.xen.upcall_vector = data->u.vector;
397                         mutex_unlock(&kvm->lock);
398                         r = 0;
399                 }
400                 break;
401
402         case KVM_XEN_ATTR_TYPE_EVTCHN:
403                 r = kvm_xen_setattr_evtchn(kvm, data);
404                 break;
405
406         default:
407                 break;
408         }
409
410         return r;
411 }
412
413 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
414 {
415         int r = -ENOENT;
416
417         mutex_lock(&kvm->lock);
418
419         switch (data->type) {
420         case KVM_XEN_ATTR_TYPE_LONG_MODE:
421                 data->u.long_mode = kvm->arch.xen.long_mode;
422                 r = 0;
423                 break;
424
425         case KVM_XEN_ATTR_TYPE_SHARED_INFO:
426                 if (kvm->arch.xen.shinfo_cache.active)
427                         data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
428                 else
429                         data->u.shared_info.gfn = GPA_INVALID;
430                 r = 0;
431                 break;
432
433         case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
434                 data->u.vector = kvm->arch.xen.upcall_vector;
435                 r = 0;
436                 break;
437
438         default:
439                 break;
440         }
441
442         mutex_unlock(&kvm->lock);
443         return r;
444 }
445
446 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
447 {
448         int idx, r = -ENOENT;
449
450         mutex_lock(&vcpu->kvm->lock);
451         idx = srcu_read_lock(&vcpu->kvm->srcu);
452
453         switch (data->type) {
454         case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
455                 /* No compat necessary here. */
456                 BUILD_BUG_ON(sizeof(struct vcpu_info) !=
457                              sizeof(struct compat_vcpu_info));
458                 BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
459                              offsetof(struct compat_vcpu_info, time));
460
461                 if (data->u.gpa == GPA_INVALID) {
462                         kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache);
463                         r = 0;
464                         break;
465                 }
466
467                 r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
468                                               &vcpu->arch.xen.vcpu_info_cache,
469                                               NULL, KVM_HOST_USES_PFN, data->u.gpa,
470                                               sizeof(struct vcpu_info));
471                 if (!r)
472                         kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
473
474                 break;
475
476         case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
477                 if (data->u.gpa == GPA_INVALID) {
478                         kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
479                                                      &vcpu->arch.xen.vcpu_time_info_cache);
480                         r = 0;
481                         break;
482                 }
483
484                 r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
485                                               &vcpu->arch.xen.vcpu_time_info_cache,
486                                               NULL, KVM_HOST_USES_PFN, data->u.gpa,
487                                               sizeof(struct pvclock_vcpu_time_info));
488                 if (!r)
489                         kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
490                 break;
491
492         case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
493                 if (!sched_info_on()) {
494                         r = -EOPNOTSUPP;
495                         break;
496                 }
497                 if (data->u.gpa == GPA_INVALID) {
498                         kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
499                                                      &vcpu->arch.xen.runstate_cache);
500                         r = 0;
501                         break;
502                 }
503
504                 r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
505                                               &vcpu->arch.xen.runstate_cache,
506                                               NULL, KVM_HOST_USES_PFN, data->u.gpa,
507                                               sizeof(struct vcpu_runstate_info));
508                 break;
509
510         case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
511                 if (!sched_info_on()) {
512                         r = -EOPNOTSUPP;
513                         break;
514                 }
515                 if (data->u.runstate.state > RUNSTATE_offline) {
516                         r = -EINVAL;
517                         break;
518                 }
519
520                 kvm_xen_update_runstate(vcpu, data->u.runstate.state);
521                 r = 0;
522                 break;
523
524         case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
525                 if (!sched_info_on()) {
526                         r = -EOPNOTSUPP;
527                         break;
528                 }
529                 if (data->u.runstate.state > RUNSTATE_offline) {
530                         r = -EINVAL;
531                         break;
532                 }
533                 if (data->u.runstate.state_entry_time !=
534                     (data->u.runstate.time_running +
535                      data->u.runstate.time_runnable +
536                      data->u.runstate.time_blocked +
537                      data->u.runstate.time_offline)) {
538                         r = -EINVAL;
539                         break;
540                 }
541                 if (get_kvmclock_ns(vcpu->kvm) <
542                     data->u.runstate.state_entry_time) {
543                         r = -EINVAL;
544                         break;
545                 }
546
547                 vcpu->arch.xen.current_runstate = data->u.runstate.state;
548                 vcpu->arch.xen.runstate_entry_time =
549                         data->u.runstate.state_entry_time;
550                 vcpu->arch.xen.runstate_times[RUNSTATE_running] =
551                         data->u.runstate.time_running;
552                 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] =
553                         data->u.runstate.time_runnable;
554                 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] =
555                         data->u.runstate.time_blocked;
556                 vcpu->arch.xen.runstate_times[RUNSTATE_offline] =
557                         data->u.runstate.time_offline;
558                 vcpu->arch.xen.last_steal = current->sched_info.run_delay;
559                 r = 0;
560                 break;
561
562         case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
563                 if (!sched_info_on()) {
564                         r = -EOPNOTSUPP;
565                         break;
566                 }
567                 if (data->u.runstate.state > RUNSTATE_offline &&
568                     data->u.runstate.state != (u64)-1) {
569                         r = -EINVAL;
570                         break;
571                 }
572                 /* The adjustment must add up */
573                 if (data->u.runstate.state_entry_time !=
574                     (data->u.runstate.time_running +
575                      data->u.runstate.time_runnable +
576                      data->u.runstate.time_blocked +
577                      data->u.runstate.time_offline)) {
578                         r = -EINVAL;
579                         break;
580                 }
581
582                 if (get_kvmclock_ns(vcpu->kvm) <
583                     (vcpu->arch.xen.runstate_entry_time +
584                      data->u.runstate.state_entry_time)) {
585                         r = -EINVAL;
586                         break;
587                 }
588
589                 vcpu->arch.xen.runstate_entry_time +=
590                         data->u.runstate.state_entry_time;
591                 vcpu->arch.xen.runstate_times[RUNSTATE_running] +=
592                         data->u.runstate.time_running;
593                 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] +=
594                         data->u.runstate.time_runnable;
595                 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] +=
596                         data->u.runstate.time_blocked;
597                 vcpu->arch.xen.runstate_times[RUNSTATE_offline] +=
598                         data->u.runstate.time_offline;
599
600                 if (data->u.runstate.state <= RUNSTATE_offline)
601                         kvm_xen_update_runstate(vcpu, data->u.runstate.state);
602                 r = 0;
603                 break;
604
605         default:
606                 break;
607         }
608
609         srcu_read_unlock(&vcpu->kvm->srcu, idx);
610         mutex_unlock(&vcpu->kvm->lock);
611         return r;
612 }
613
614 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
615 {
616         int r = -ENOENT;
617
618         mutex_lock(&vcpu->kvm->lock);
619
620         switch (data->type) {
621         case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
622                 if (vcpu->arch.xen.vcpu_info_cache.active)
623                         data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
624                 else
625                         data->u.gpa = GPA_INVALID;
626                 r = 0;
627                 break;
628
629         case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
630                 if (vcpu->arch.xen.vcpu_time_info_cache.active)
631                         data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
632                 else
633                         data->u.gpa = GPA_INVALID;
634                 r = 0;
635                 break;
636
637         case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
638                 if (!sched_info_on()) {
639                         r = -EOPNOTSUPP;
640                         break;
641                 }
642                 if (vcpu->arch.xen.runstate_cache.active) {
643                         data->u.gpa = vcpu->arch.xen.runstate_cache.gpa;
644                         r = 0;
645                 }
646                 break;
647
648         case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
649                 if (!sched_info_on()) {
650                         r = -EOPNOTSUPP;
651                         break;
652                 }
653                 data->u.runstate.state = vcpu->arch.xen.current_runstate;
654                 r = 0;
655                 break;
656
657         case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
658                 if (!sched_info_on()) {
659                         r = -EOPNOTSUPP;
660                         break;
661                 }
662                 data->u.runstate.state = vcpu->arch.xen.current_runstate;
663                 data->u.runstate.state_entry_time =
664                         vcpu->arch.xen.runstate_entry_time;
665                 data->u.runstate.time_running =
666                         vcpu->arch.xen.runstate_times[RUNSTATE_running];
667                 data->u.runstate.time_runnable =
668                         vcpu->arch.xen.runstate_times[RUNSTATE_runnable];
669                 data->u.runstate.time_blocked =
670                         vcpu->arch.xen.runstate_times[RUNSTATE_blocked];
671                 data->u.runstate.time_offline =
672                         vcpu->arch.xen.runstate_times[RUNSTATE_offline];
673                 r = 0;
674                 break;
675
676         case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
677                 r = -EINVAL;
678                 break;
679
680         default:
681                 break;
682         }
683
684         mutex_unlock(&vcpu->kvm->lock);
685         return r;
686 }
687
688 int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
689 {
690         struct kvm *kvm = vcpu->kvm;
691         u32 page_num = data & ~PAGE_MASK;
692         u64 page_addr = data & PAGE_MASK;
693         bool lm = is_long_mode(vcpu);
694
695         /* Latch long_mode for shared_info pages etc. */
696         vcpu->kvm->arch.xen.long_mode = lm;
697
698         /*
699          * If Xen hypercall intercept is enabled, fill the hypercall
700          * page with VMCALL/VMMCALL instructions since that's what
701          * we catch. Else the VMM has provided the hypercall pages
702          * with instructions of its own choosing, so use those.
703          */
704         if (kvm_xen_hypercall_enabled(kvm)) {
705                 u8 instructions[32];
706                 int i;
707
708                 if (page_num)
709                         return 1;
710
711                 /* mov imm32, %eax */
712                 instructions[0] = 0xb8;
713
714                 /* vmcall / vmmcall */
715                 static_call(kvm_x86_patch_hypercall)(vcpu, instructions + 5);
716
717                 /* ret */
718                 instructions[8] = 0xc3;
719
720                 /* int3 to pad */
721                 memset(instructions + 9, 0xcc, sizeof(instructions) - 9);
722
723                 for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) {
724                         *(u32 *)&instructions[1] = i;
725                         if (kvm_vcpu_write_guest(vcpu,
726                                                  page_addr + (i * sizeof(instructions)),
727                                                  instructions, sizeof(instructions)))
728                                 return 1;
729                 }
730         } else {
731                 /*
732                  * Note, truncation is a non-issue as 'lm' is guaranteed to be
733                  * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes.
734                  */
735                 hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64
736                                      : kvm->arch.xen_hvm_config.blob_addr_32;
737                 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
738                                   : kvm->arch.xen_hvm_config.blob_size_32;
739                 u8 *page;
740
741                 if (page_num >= blob_size)
742                         return 1;
743
744                 blob_addr += page_num * PAGE_SIZE;
745
746                 page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE);
747                 if (IS_ERR(page))
748                         return PTR_ERR(page);
749
750                 if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) {
751                         kfree(page);
752                         return 1;
753                 }
754         }
755         return 0;
756 }
757
758 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
759 {
760         if (xhc->flags & ~KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL)
761                 return -EINVAL;
762
763         /*
764          * With hypercall interception the kernel generates its own
765          * hypercall page so it must not be provided.
766          */
767         if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) &&
768             (xhc->blob_addr_32 || xhc->blob_addr_64 ||
769              xhc->blob_size_32 || xhc->blob_size_64))
770                 return -EINVAL;
771
772         mutex_lock(&kvm->lock);
773
774         if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
775                 static_branch_inc(&kvm_xen_enabled.key);
776         else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
777                 static_branch_slow_dec_deferred(&kvm_xen_enabled);
778
779         memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
780
781         mutex_unlock(&kvm->lock);
782         return 0;
783 }
784
785 static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
786 {
787         kvm_rax_write(vcpu, result);
788         return kvm_skip_emulated_instruction(vcpu);
789 }
790
791 static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
792 {
793         struct kvm_run *run = vcpu->run;
794
795         if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip)))
796                 return 1;
797
798         return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result);
799 }
800
801 int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
802 {
803         bool longmode;
804         u64 input, params[6], r = -ENOSYS;
805         bool handled = false;
806
807         input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
808
809         /* Hyper-V hypercalls get bit 31 set in EAX */
810         if ((input & 0x80000000) &&
811             kvm_hv_hypercall_enabled(vcpu))
812                 return kvm_hv_hypercall(vcpu);
813
814         longmode = is_64_bit_hypercall(vcpu);
815         if (!longmode) {
816                 params[0] = (u32)kvm_rbx_read(vcpu);
817                 params[1] = (u32)kvm_rcx_read(vcpu);
818                 params[2] = (u32)kvm_rdx_read(vcpu);
819                 params[3] = (u32)kvm_rsi_read(vcpu);
820                 params[4] = (u32)kvm_rdi_read(vcpu);
821                 params[5] = (u32)kvm_rbp_read(vcpu);
822         }
823 #ifdef CONFIG_X86_64
824         else {
825                 params[0] = (u64)kvm_rdi_read(vcpu);
826                 params[1] = (u64)kvm_rsi_read(vcpu);
827                 params[2] = (u64)kvm_rdx_read(vcpu);
828                 params[3] = (u64)kvm_r10_read(vcpu);
829                 params[4] = (u64)kvm_r8_read(vcpu);
830                 params[5] = (u64)kvm_r9_read(vcpu);
831         }
832 #endif
833         trace_kvm_xen_hypercall(input, params[0], params[1], params[2],
834                                 params[3], params[4], params[5]);
835
836         switch (input) {
837         case __HYPERVISOR_event_channel_op:
838                 if (params[0] == EVTCHNOP_send)
839                         handled = kvm_xen_hcall_evtchn_send(vcpu, params[1], &r);
840                 break;
841
842         default:
843                 break;
844         }
845
846         if (handled)
847                 return kvm_xen_hypercall_set_result(vcpu, r);
848
849         vcpu->run->exit_reason = KVM_EXIT_XEN;
850         vcpu->run->xen.type = KVM_EXIT_XEN_HCALL;
851         vcpu->run->xen.u.hcall.longmode = longmode;
852         vcpu->run->xen.u.hcall.cpl = static_call(kvm_x86_get_cpl)(vcpu);
853         vcpu->run->xen.u.hcall.input = input;
854         vcpu->run->xen.u.hcall.params[0] = params[0];
855         vcpu->run->xen.u.hcall.params[1] = params[1];
856         vcpu->run->xen.u.hcall.params[2] = params[2];
857         vcpu->run->xen.u.hcall.params[3] = params[3];
858         vcpu->run->xen.u.hcall.params[4] = params[4];
859         vcpu->run->xen.u.hcall.params[5] = params[5];
860         vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu);
861         vcpu->arch.complete_userspace_io =
862                 kvm_xen_hypercall_complete_userspace;
863
864         return 0;
865 }
866
867 static inline int max_evtchn_port(struct kvm *kvm)
868 {
869         if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode)
870                 return EVTCHN_2L_NR_CHANNELS;
871         else
872                 return COMPAT_EVTCHN_2L_NR_CHANNELS;
873 }
874
875 /*
876  * The return value from this function is propagated to kvm_set_irq() API,
877  * so it returns:
878  *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
879  *  = 0   Interrupt was coalesced (previous irq is still pending)
880  *  > 0   Number of CPUs interrupt was delivered to
881  *
882  * It is also called directly from kvm_arch_set_irq_inatomic(), where the
883  * only check on its return value is a comparison with -EWOULDBLOCK'.
884  */
885 int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
886 {
887         struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
888         struct kvm_vcpu *vcpu;
889         unsigned long *pending_bits, *mask_bits;
890         unsigned long flags;
891         int port_word_bit;
892         bool kick_vcpu = false;
893         int vcpu_idx, idx, rc;
894
895         vcpu_idx = READ_ONCE(xe->vcpu_idx);
896         if (vcpu_idx >= 0)
897                 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
898         else {
899                 vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id);
900                 if (!vcpu)
901                         return -EINVAL;
902                 WRITE_ONCE(xe->vcpu_idx, kvm_vcpu_get_idx(vcpu));
903         }
904
905         if (!vcpu->arch.xen.vcpu_info_cache.active)
906                 return -EINVAL;
907
908         if (xe->port >= max_evtchn_port(kvm))
909                 return -EINVAL;
910
911         rc = -EWOULDBLOCK;
912
913         idx = srcu_read_lock(&kvm->srcu);
914
915         read_lock_irqsave(&gpc->lock, flags);
916         if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
917                 goto out_rcu;
918
919         if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
920                 struct shared_info *shinfo = gpc->khva;
921                 pending_bits = (unsigned long *)&shinfo->evtchn_pending;
922                 mask_bits = (unsigned long *)&shinfo->evtchn_mask;
923                 port_word_bit = xe->port / 64;
924         } else {
925                 struct compat_shared_info *shinfo = gpc->khva;
926                 pending_bits = (unsigned long *)&shinfo->evtchn_pending;
927                 mask_bits = (unsigned long *)&shinfo->evtchn_mask;
928                 port_word_bit = xe->port / 32;
929         }
930
931         /*
932          * If this port wasn't already set, and if it isn't masked, then
933          * we try to set the corresponding bit in the in-kernel shadow of
934          * evtchn_pending_sel for the target vCPU. And if *that* wasn't
935          * already set, then we kick the vCPU in question to write to the
936          * *real* evtchn_pending_sel in its own guest vcpu_info struct.
937          */
938         if (test_and_set_bit(xe->port, pending_bits)) {
939                 rc = 0; /* It was already raised */
940         } else if (test_bit(xe->port, mask_bits)) {
941                 rc = -ENOTCONN; /* Masked */
942         } else {
943                 rc = 1; /* Delivered to the bitmap in shared_info. */
944                 /* Now switch to the vCPU's vcpu_info to set the index and pending_sel */
945                 read_unlock_irqrestore(&gpc->lock, flags);
946                 gpc = &vcpu->arch.xen.vcpu_info_cache;
947
948                 read_lock_irqsave(&gpc->lock, flags);
949                 if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, sizeof(struct vcpu_info))) {
950                         /*
951                          * Could not access the vcpu_info. Set the bit in-kernel
952                          * and prod the vCPU to deliver it for itself.
953                          */
954                         if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
955                                 kick_vcpu = true;
956                         goto out_rcu;
957                 }
958
959                 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
960                         struct vcpu_info *vcpu_info = gpc->khva;
961                         if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) {
962                                 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
963                                 kick_vcpu = true;
964                         }
965                 } else {
966                         struct compat_vcpu_info *vcpu_info = gpc->khva;
967                         if (!test_and_set_bit(port_word_bit,
968                                               (unsigned long *)&vcpu_info->evtchn_pending_sel)) {
969                                 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
970                                 kick_vcpu = true;
971                         }
972                 }
973         }
974
975  out_rcu:
976         read_unlock_irqrestore(&gpc->lock, flags);
977         srcu_read_unlock(&kvm->srcu, idx);
978
979         if (kick_vcpu) {
980                 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
981                 kvm_vcpu_kick(vcpu);
982         }
983
984         return rc;
985 }
986
987 static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
988 {
989         bool mm_borrowed = false;
990         int rc;
991
992         rc = kvm_xen_set_evtchn_fast(xe, kvm);
993         if (rc != -EWOULDBLOCK)
994                 return rc;
995
996         if (current->mm != kvm->mm) {
997                 /*
998                  * If not on a thread which already belongs to this KVM,
999                  * we'd better be in the irqfd workqueue.
1000                  */
1001                 if (WARN_ON_ONCE(current->mm))
1002                         return -EINVAL;
1003
1004                 kthread_use_mm(kvm->mm);
1005                 mm_borrowed = true;
1006         }
1007
1008         /*
1009          * For the irqfd workqueue, using the main kvm->lock mutex is
1010          * fine since this function is invoked from kvm_set_irq() with
1011          * no other lock held, no srcu. In future if it will be called
1012          * directly from a vCPU thread (e.g. on hypercall for an IPI)
1013          * then it may need to switch to using a leaf-node mutex for
1014          * serializing the shared_info mapping.
1015          */
1016         mutex_lock(&kvm->lock);
1017
1018         /*
1019          * It is theoretically possible for the page to be unmapped
1020          * and the MMU notifier to invalidate the shared_info before
1021          * we even get to use it. In that case, this looks like an
1022          * infinite loop. It was tempting to do it via the userspace
1023          * HVA instead... but that just *hides* the fact that it's
1024          * an infinite loop, because if a fault occurs and it waits
1025          * for the page to come back, it can *still* immediately
1026          * fault and have to wait again, repeatedly.
1027          *
1028          * Conversely, the page could also have been reinstated by
1029          * another thread before we even obtain the mutex above, so
1030          * check again *first* before remapping it.
1031          */
1032         do {
1033                 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1034                 int idx;
1035
1036                 rc = kvm_xen_set_evtchn_fast(xe, kvm);
1037                 if (rc != -EWOULDBLOCK)
1038                         break;
1039
1040                 idx = srcu_read_lock(&kvm->srcu);
1041                 rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE);
1042                 srcu_read_unlock(&kvm->srcu, idx);
1043         } while(!rc);
1044
1045         mutex_unlock(&kvm->lock);
1046
1047         if (mm_borrowed)
1048                 kthread_unuse_mm(kvm->mm);
1049
1050         return rc;
1051 }
1052
1053 /* This is the version called from kvm_set_irq() as the .set function */
1054 static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1055                          int irq_source_id, int level, bool line_status)
1056 {
1057         if (!level)
1058                 return -EINVAL;
1059
1060         return kvm_xen_set_evtchn(&e->xen_evtchn, kvm);
1061 }
1062
1063 /*
1064  * Set up an event channel interrupt from the KVM IRQ routing table.
1065  * Used for e.g. PIRQ from passed through physical devices.
1066  */
1067 int kvm_xen_setup_evtchn(struct kvm *kvm,
1068                          struct kvm_kernel_irq_routing_entry *e,
1069                          const struct kvm_irq_routing_entry *ue)
1070
1071 {
1072         struct kvm_vcpu *vcpu;
1073
1074         if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm))
1075                 return -EINVAL;
1076
1077         /* We only support 2 level event channels for now */
1078         if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1079                 return -EINVAL;
1080
1081         /*
1082          * Xen gives us interesting mappings from vCPU index to APIC ID,
1083          * which means kvm_get_vcpu_by_id() has to iterate over all vCPUs
1084          * to find it. Do that once at setup time, instead of every time.
1085          * But beware that on live update / live migration, the routing
1086          * table might be reinstated before the vCPU threads have finished
1087          * recreating their vCPUs.
1088          */
1089         vcpu = kvm_get_vcpu_by_id(kvm, ue->u.xen_evtchn.vcpu);
1090         if (vcpu)
1091                 e->xen_evtchn.vcpu_idx = kvm_vcpu_get_idx(vcpu);
1092         else
1093                 e->xen_evtchn.vcpu_idx = -1;
1094
1095         e->xen_evtchn.port = ue->u.xen_evtchn.port;
1096         e->xen_evtchn.vcpu_id = ue->u.xen_evtchn.vcpu;
1097         e->xen_evtchn.priority = ue->u.xen_evtchn.priority;
1098         e->set = evtchn_set_fn;
1099
1100         return 0;
1101 }
1102
1103 /*
1104  * Explicit event sending from userspace with KVM_XEN_HVM_EVTCHN_SEND ioctl.
1105  */
1106 int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *uxe)
1107 {
1108         struct kvm_xen_evtchn e;
1109         int ret;
1110
1111         if (!uxe->port || uxe->port >= max_evtchn_port(kvm))
1112                 return -EINVAL;
1113
1114         /* We only support 2 level event channels for now */
1115         if (uxe->priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1116                 return -EINVAL;
1117
1118         e.port = uxe->port;
1119         e.vcpu_id = uxe->vcpu;
1120         e.vcpu_idx = -1;
1121         e.priority = uxe->priority;
1122
1123         ret = kvm_xen_set_evtchn(&e, kvm);
1124
1125         /*
1126          * None of that 'return 1 if it actually got delivered' nonsense.
1127          * We don't care if it was masked (-ENOTCONN) either.
1128          */
1129         if (ret > 0 || ret == -ENOTCONN)
1130                 ret = 0;
1131
1132         return ret;
1133 }
1134
1135 /*
1136  * Support for *outbound* event channel events via the EVTCHNOP_send hypercall.
1137  */
1138 struct evtchnfd {
1139         u32 send_port;
1140         u32 type;
1141         union {
1142                 struct kvm_xen_evtchn port;
1143                 struct {
1144                         u32 port; /* zero */
1145                         struct eventfd_ctx *ctx;
1146                 } eventfd;
1147         } deliver;
1148 };
1149
1150 /*
1151  * Update target vCPU or priority for a registered sending channel.
1152  */
1153 static int kvm_xen_eventfd_update(struct kvm *kvm,
1154                                   struct kvm_xen_hvm_attr *data)
1155 {
1156         u32 port = data->u.evtchn.send_port;
1157         struct evtchnfd *evtchnfd;
1158
1159         if (!port || port >= max_evtchn_port(kvm))
1160                 return -EINVAL;
1161
1162         mutex_lock(&kvm->lock);
1163         evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port);
1164         mutex_unlock(&kvm->lock);
1165
1166         if (!evtchnfd)
1167                 return -ENOENT;
1168
1169         /* For an UPDATE, nothing may change except the priority/vcpu */
1170         if (evtchnfd->type != data->u.evtchn.type)
1171                 return -EINVAL;
1172
1173         /*
1174          * Port cannot change, and if it's zero that was an eventfd
1175          * which can't be changed either.
1176          */
1177         if (!evtchnfd->deliver.port.port ||
1178             evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port)
1179                 return -EINVAL;
1180
1181         /* We only support 2 level event channels for now */
1182         if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1183                 return -EINVAL;
1184
1185         mutex_lock(&kvm->lock);
1186         evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
1187         if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) {
1188                 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
1189                 evtchnfd->deliver.port.vcpu_idx = -1;
1190         }
1191         mutex_unlock(&kvm->lock);
1192         return 0;
1193 }
1194
1195 /*
1196  * Configure the target (eventfd or local port delivery) for sending on
1197  * a given event channel.
1198  */
1199 static int kvm_xen_eventfd_assign(struct kvm *kvm,
1200                                   struct kvm_xen_hvm_attr *data)
1201 {
1202         u32 port = data->u.evtchn.send_port;
1203         struct eventfd_ctx *eventfd = NULL;
1204         struct evtchnfd *evtchnfd = NULL;
1205         int ret = -EINVAL;
1206
1207         if (!port || port >= max_evtchn_port(kvm))
1208                 return -EINVAL;
1209
1210         evtchnfd = kzalloc(sizeof(struct evtchnfd), GFP_KERNEL);
1211         if (!evtchnfd)
1212                 return -ENOMEM;
1213
1214         switch(data->u.evtchn.type) {
1215         case EVTCHNSTAT_ipi:
1216                 /* IPI  must map back to the same port# */
1217                 if (data->u.evtchn.deliver.port.port != data->u.evtchn.send_port)
1218                         goto out; /* -EINVAL */
1219                 break;
1220
1221         case EVTCHNSTAT_interdomain:
1222                 if (data->u.evtchn.deliver.port.port) {
1223                         if (data->u.evtchn.deliver.port.port >= max_evtchn_port(kvm))
1224                                 goto out; /* -EINVAL */
1225                 } else {
1226                         eventfd = eventfd_ctx_fdget(data->u.evtchn.deliver.eventfd.fd);
1227                         if (IS_ERR(eventfd)) {
1228                                 ret = PTR_ERR(eventfd);
1229                                 goto out;
1230                         }
1231                 }
1232                 break;
1233
1234         case EVTCHNSTAT_virq:
1235         case EVTCHNSTAT_closed:
1236         case EVTCHNSTAT_unbound:
1237         case EVTCHNSTAT_pirq:
1238         default: /* Unknown event channel type */
1239                 goto out; /* -EINVAL */
1240         }
1241
1242         evtchnfd->send_port = data->u.evtchn.send_port;
1243         evtchnfd->type = data->u.evtchn.type;
1244         if (eventfd) {
1245                 evtchnfd->deliver.eventfd.ctx = eventfd;
1246         } else {
1247                 /* We only support 2 level event channels for now */
1248                 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1249                         goto out; /* -EINVAL; */
1250
1251                 evtchnfd->deliver.port.port = data->u.evtchn.deliver.port.port;
1252                 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
1253                 evtchnfd->deliver.port.vcpu_idx = -1;
1254                 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
1255         }
1256
1257         mutex_lock(&kvm->lock);
1258         ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1,
1259                         GFP_KERNEL);
1260         mutex_unlock(&kvm->lock);
1261         if (ret >= 0)
1262                 return 0;
1263
1264         if (ret == -ENOSPC)
1265                 ret = -EEXIST;
1266 out:
1267         if (eventfd)
1268                 eventfd_ctx_put(eventfd);
1269         kfree(evtchnfd);
1270         return ret;
1271 }
1272
1273 static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
1274 {
1275         struct evtchnfd *evtchnfd;
1276
1277         mutex_lock(&kvm->lock);
1278         evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port);
1279         mutex_unlock(&kvm->lock);
1280
1281         if (!evtchnfd)
1282                 return -ENOENT;
1283
1284         if (kvm)
1285                 synchronize_srcu(&kvm->srcu);
1286         if (!evtchnfd->deliver.port.port)
1287                 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
1288         kfree(evtchnfd);
1289         return 0;
1290 }
1291
1292 static int kvm_xen_eventfd_reset(struct kvm *kvm)
1293 {
1294         struct evtchnfd *evtchnfd;
1295         int i;
1296
1297         mutex_lock(&kvm->lock);
1298         idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
1299                 idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port);
1300                 synchronize_srcu(&kvm->srcu);
1301                 if (!evtchnfd->deliver.port.port)
1302                         eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
1303                 kfree(evtchnfd);
1304         }
1305         mutex_unlock(&kvm->lock);
1306
1307         return 0;
1308 }
1309
1310 static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
1311 {
1312         u32 port = data->u.evtchn.send_port;
1313
1314         if (data->u.evtchn.flags == KVM_XEN_EVTCHN_RESET)
1315                 return kvm_xen_eventfd_reset(kvm);
1316
1317         if (!port || port >= max_evtchn_port(kvm))
1318                 return -EINVAL;
1319
1320         if (data->u.evtchn.flags == KVM_XEN_EVTCHN_DEASSIGN)
1321                 return kvm_xen_eventfd_deassign(kvm, port);
1322         if (data->u.evtchn.flags == KVM_XEN_EVTCHN_UPDATE)
1323                 return kvm_xen_eventfd_update(kvm, data);
1324         if (data->u.evtchn.flags)
1325                 return -EINVAL;
1326
1327         return kvm_xen_eventfd_assign(kvm, data);
1328 }
1329
1330 static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
1331 {
1332         struct evtchnfd *evtchnfd;
1333         struct evtchn_send send;
1334         gpa_t gpa;
1335         int idx;
1336
1337         idx = srcu_read_lock(&vcpu->kvm->srcu);
1338         gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
1339         srcu_read_unlock(&vcpu->kvm->srcu, idx);
1340
1341         if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &send, sizeof(send))) {
1342                 *r = -EFAULT;
1343                 return true;
1344         }
1345
1346         /* The evtchn_ports idr is protected by vcpu->kvm->srcu */
1347         evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port);
1348         if (!evtchnfd)
1349                 return false;
1350
1351         if (evtchnfd->deliver.port.port) {
1352                 int ret = kvm_xen_set_evtchn(&evtchnfd->deliver.port, vcpu->kvm);
1353                 if (ret < 0 && ret != -ENOTCONN)
1354                         return false;
1355         } else {
1356                 eventfd_signal(evtchnfd->deliver.eventfd.ctx, 1);
1357         }
1358
1359         *r = 0;
1360         return true;
1361 }
1362
1363 void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
1364 {
1365         kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
1366                                      &vcpu->arch.xen.runstate_cache);
1367         kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
1368                                      &vcpu->arch.xen.vcpu_info_cache);
1369         kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
1370                                      &vcpu->arch.xen.vcpu_time_info_cache);
1371 }
1372
1373 void kvm_xen_init_vm(struct kvm *kvm)
1374 {
1375         idr_init(&kvm->arch.xen.evtchn_ports);
1376 }
1377
1378 void kvm_xen_destroy_vm(struct kvm *kvm)
1379 {
1380         struct evtchnfd *evtchnfd;
1381         int i;
1382
1383         kvm_gfn_to_pfn_cache_destroy(kvm, &kvm->arch.xen.shinfo_cache);
1384
1385         idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
1386                 if (!evtchnfd->deliver.port.port)
1387                         eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
1388                 kfree(evtchnfd);
1389         }
1390         idr_destroy(&kvm->arch.xen.evtchn_ports);
1391
1392         if (kvm->arch.xen_hvm_config.msr)
1393                 static_branch_slow_dec_deferred(&kvm_xen_enabled);
1394 }
This page took 0.126556 seconds and 4 git commands to generate.