]> Git Repo - linux.git/blob - arch/powerpc/kvm/powerpc.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux.git] / arch / powerpc / kvm / powerpc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright IBM Corp. 2007
5  *
6  * Authors: Hollis Blanchard <[email protected]>
7  *          Christian Ehrhardt <[email protected]>
8  */
9
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/vmalloc.h>
14 #include <linux/hrtimer.h>
15 #include <linux/sched/signal.h>
16 #include <linux/fs.h>
17 #include <linux/slab.h>
18 #include <linux/file.h>
19 #include <linux/module.h>
20 #include <linux/irqbypass.h>
21 #include <linux/kvm_irqfd.h>
22 #include <linux/of.h>
23 #include <asm/cputable.h>
24 #include <linux/uaccess.h>
25 #include <asm/kvm_ppc.h>
26 #include <asm/cputhreads.h>
27 #include <asm/irqflags.h>
28 #include <asm/iommu.h>
29 #include <asm/switch_to.h>
30 #include <asm/xive.h>
31 #ifdef CONFIG_PPC_PSERIES
32 #include <asm/hvcall.h>
33 #include <asm/plpar_wrappers.h>
34 #endif
35 #include <asm/ultravisor.h>
36 #include <asm/setup.h>
37
38 #include "timing.h"
39 #include "../mm/mmu_decl.h"
40
41 #define CREATE_TRACE_POINTS
42 #include "trace.h"
43
44 struct kvmppc_ops *kvmppc_hv_ops;
45 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
46 struct kvmppc_ops *kvmppc_pr_ops;
47 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
48
49
50 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
51 {
52         return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
53 }
54
55 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
56 {
57         return kvm_arch_vcpu_runnable(vcpu);
58 }
59
60 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
61 {
62         return false;
63 }
64
65 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
66 {
67         return 1;
68 }
69
70 /*
71  * Common checks before entering the guest world.  Call with interrupts
72  * disabled.
73  *
74  * returns:
75  *
76  * == 1 if we're ready to go into guest state
77  * <= 0 if we need to go back to the host with return value
78  */
79 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
80 {
81         int r;
82
83         WARN_ON(irqs_disabled());
84         hard_irq_disable();
85
86         while (true) {
87                 if (need_resched()) {
88                         local_irq_enable();
89                         cond_resched();
90                         hard_irq_disable();
91                         continue;
92                 }
93
94                 if (signal_pending(current)) {
95                         kvmppc_account_exit(vcpu, SIGNAL_EXITS);
96                         vcpu->run->exit_reason = KVM_EXIT_INTR;
97                         r = -EINTR;
98                         break;
99                 }
100
101                 vcpu->mode = IN_GUEST_MODE;
102
103                 /*
104                  * Reading vcpu->requests must happen after setting vcpu->mode,
105                  * so we don't miss a request because the requester sees
106                  * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
107                  * before next entering the guest (and thus doesn't IPI).
108                  * This also orders the write to mode from any reads
109                  * to the page tables done while the VCPU is running.
110                  * Please see the comment in kvm_flush_remote_tlbs.
111                  */
112                 smp_mb();
113
114                 if (kvm_request_pending(vcpu)) {
115                         /* Make sure we process requests preemptable */
116                         local_irq_enable();
117                         trace_kvm_check_requests(vcpu);
118                         r = kvmppc_core_check_requests(vcpu);
119                         hard_irq_disable();
120                         if (r > 0)
121                                 continue;
122                         break;
123                 }
124
125                 if (kvmppc_core_prepare_to_enter(vcpu)) {
126                         /* interrupts got enabled in between, so we
127                            are back at square 1 */
128                         continue;
129                 }
130
131                 guest_enter_irqoff();
132                 return 1;
133         }
134
135         /* return to host */
136         local_irq_enable();
137         return r;
138 }
139 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
140
141 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
142 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
143 {
144         struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
145         int i;
146
147         shared->sprg0 = swab64(shared->sprg0);
148         shared->sprg1 = swab64(shared->sprg1);
149         shared->sprg2 = swab64(shared->sprg2);
150         shared->sprg3 = swab64(shared->sprg3);
151         shared->srr0 = swab64(shared->srr0);
152         shared->srr1 = swab64(shared->srr1);
153         shared->dar = swab64(shared->dar);
154         shared->msr = swab64(shared->msr);
155         shared->dsisr = swab32(shared->dsisr);
156         shared->int_pending = swab32(shared->int_pending);
157         for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
158                 shared->sr[i] = swab32(shared->sr[i]);
159 }
160 #endif
161
162 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
163 {
164         int nr = kvmppc_get_gpr(vcpu, 11);
165         int r;
166         unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
167         unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
168         unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
169         unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
170         unsigned long r2 = 0;
171
172         if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
173                 /* 32 bit mode */
174                 param1 &= 0xffffffff;
175                 param2 &= 0xffffffff;
176                 param3 &= 0xffffffff;
177                 param4 &= 0xffffffff;
178         }
179
180         switch (nr) {
181         case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
182         {
183 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
184                 /* Book3S can be little endian, find it out here */
185                 int shared_big_endian = true;
186                 if (vcpu->arch.intr_msr & MSR_LE)
187                         shared_big_endian = false;
188                 if (shared_big_endian != vcpu->arch.shared_big_endian)
189                         kvmppc_swab_shared(vcpu);
190                 vcpu->arch.shared_big_endian = shared_big_endian;
191 #endif
192
193                 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
194                         /*
195                          * Older versions of the Linux magic page code had
196                          * a bug where they would map their trampoline code
197                          * NX. If that's the case, remove !PR NX capability.
198                          */
199                         vcpu->arch.disable_kernel_nx = true;
200                         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
201                 }
202
203                 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
204                 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
205
206 #ifdef CONFIG_PPC_64K_PAGES
207                 /*
208                  * Make sure our 4k magic page is in the same window of a 64k
209                  * page within the guest and within the host's page.
210                  */
211                 if ((vcpu->arch.magic_page_pa & 0xf000) !=
212                     ((ulong)vcpu->arch.shared & 0xf000)) {
213                         void *old_shared = vcpu->arch.shared;
214                         ulong shared = (ulong)vcpu->arch.shared;
215                         void *new_shared;
216
217                         shared &= PAGE_MASK;
218                         shared |= vcpu->arch.magic_page_pa & 0xf000;
219                         new_shared = (void*)shared;
220                         memcpy(new_shared, old_shared, 0x1000);
221                         vcpu->arch.shared = new_shared;
222                 }
223 #endif
224
225                 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
226
227                 r = EV_SUCCESS;
228                 break;
229         }
230         case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
231                 r = EV_SUCCESS;
232 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
233                 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
234 #endif
235
236                 /* Second return value is in r4 */
237                 break;
238         case EV_HCALL_TOKEN(EV_IDLE):
239                 r = EV_SUCCESS;
240                 kvm_vcpu_halt(vcpu);
241                 break;
242         default:
243                 r = EV_UNIMPLEMENTED;
244                 break;
245         }
246
247         kvmppc_set_gpr(vcpu, 4, r2);
248
249         return r;
250 }
251 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
252
253 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
254 {
255         int r = false;
256
257         /* We have to know what CPU to virtualize */
258         if (!vcpu->arch.pvr)
259                 goto out;
260
261         /* PAPR only works with book3s_64 */
262         if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
263                 goto out;
264
265         /* HV KVM can only do PAPR mode for now */
266         if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
267                 goto out;
268
269 #ifdef CONFIG_KVM_BOOKE_HV
270         if (!cpu_has_feature(CPU_FTR_EMB_HV))
271                 goto out;
272 #endif
273
274         r = true;
275
276 out:
277         vcpu->arch.sane = r;
278         return r ? 0 : -EINVAL;
279 }
280 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
281
282 int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
283 {
284         enum emulation_result er;
285         int r;
286
287         er = kvmppc_emulate_loadstore(vcpu);
288         switch (er) {
289         case EMULATE_DONE:
290                 /* Future optimization: only reload non-volatiles if they were
291                  * actually modified. */
292                 r = RESUME_GUEST_NV;
293                 break;
294         case EMULATE_AGAIN:
295                 r = RESUME_GUEST;
296                 break;
297         case EMULATE_DO_MMIO:
298                 vcpu->run->exit_reason = KVM_EXIT_MMIO;
299                 /* We must reload nonvolatiles because "update" load/store
300                  * instructions modify register state. */
301                 /* Future optimization: only reload non-volatiles if they were
302                  * actually modified. */
303                 r = RESUME_HOST_NV;
304                 break;
305         case EMULATE_FAIL:
306         {
307                 ppc_inst_t last_inst;
308
309                 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
310                 kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n",
311                                       ppc_inst_val(last_inst));
312
313                 /*
314                  * Injecting a Data Storage here is a bit more
315                  * accurate since the instruction that caused the
316                  * access could still be a valid one.
317                  */
318                 if (!IS_ENABLED(CONFIG_BOOKE)) {
319                         ulong dsisr = DSISR_BADACCESS;
320
321                         if (vcpu->mmio_is_write)
322                                 dsisr |= DSISR_ISSTORE;
323
324                         kvmppc_core_queue_data_storage(vcpu,
325                                         kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
326                                         vcpu->arch.vaddr_accessed, dsisr);
327                 } else {
328                         /*
329                          * BookE does not send a SIGBUS on a bad
330                          * fault, so use a Program interrupt instead
331                          * to avoid a fault loop.
332                          */
333                         kvmppc_core_queue_program(vcpu, 0);
334                 }
335
336                 r = RESUME_GUEST;
337                 break;
338         }
339         default:
340                 WARN_ON(1);
341                 r = RESUME_GUEST;
342         }
343
344         return r;
345 }
346 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
347
348 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
349               bool data)
350 {
351         ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
352         struct kvmppc_pte pte;
353         int r = -EINVAL;
354
355         vcpu->stat.st++;
356
357         if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
358                 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
359                                                             size);
360
361         if ((!r) || (r == -EAGAIN))
362                 return r;
363
364         r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
365                          XLATE_WRITE, &pte);
366         if (r < 0)
367                 return r;
368
369         *eaddr = pte.raddr;
370
371         if (!pte.may_write)
372                 return -EPERM;
373
374         /* Magic page override */
375         if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
376             ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
377             !(kvmppc_get_msr(vcpu) & MSR_PR)) {
378                 void *magic = vcpu->arch.shared;
379                 magic += pte.eaddr & 0xfff;
380                 memcpy(magic, ptr, size);
381                 return EMULATE_DONE;
382         }
383
384         if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
385                 return EMULATE_DO_MMIO;
386
387         return EMULATE_DONE;
388 }
389 EXPORT_SYMBOL_GPL(kvmppc_st);
390
391 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
392                       bool data)
393 {
394         ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
395         struct kvmppc_pte pte;
396         int rc = -EINVAL;
397
398         vcpu->stat.ld++;
399
400         if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
401                 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
402                                                               size);
403
404         if ((!rc) || (rc == -EAGAIN))
405                 return rc;
406
407         rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
408                           XLATE_READ, &pte);
409         if (rc)
410                 return rc;
411
412         *eaddr = pte.raddr;
413
414         if (!pte.may_read)
415                 return -EPERM;
416
417         if (!data && !pte.may_execute)
418                 return -ENOEXEC;
419
420         /* Magic page override */
421         if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
422             ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
423             !(kvmppc_get_msr(vcpu) & MSR_PR)) {
424                 void *magic = vcpu->arch.shared;
425                 magic += pte.eaddr & 0xfff;
426                 memcpy(ptr, magic, size);
427                 return EMULATE_DONE;
428         }
429
430         kvm_vcpu_srcu_read_lock(vcpu);
431         rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
432         kvm_vcpu_srcu_read_unlock(vcpu);
433         if (rc)
434                 return EMULATE_DO_MMIO;
435
436         return EMULATE_DONE;
437 }
438 EXPORT_SYMBOL_GPL(kvmppc_ld);
439
440 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
441 {
442         struct kvmppc_ops *kvm_ops = NULL;
443         int r;
444
445         /*
446          * if we have both HV and PR enabled, default is HV
447          */
448         if (type == 0) {
449                 if (kvmppc_hv_ops)
450                         kvm_ops = kvmppc_hv_ops;
451                 else
452                         kvm_ops = kvmppc_pr_ops;
453                 if (!kvm_ops)
454                         goto err_out;
455         } else  if (type == KVM_VM_PPC_HV) {
456                 if (!kvmppc_hv_ops)
457                         goto err_out;
458                 kvm_ops = kvmppc_hv_ops;
459         } else if (type == KVM_VM_PPC_PR) {
460                 if (!kvmppc_pr_ops)
461                         goto err_out;
462                 kvm_ops = kvmppc_pr_ops;
463         } else
464                 goto err_out;
465
466         if (!try_module_get(kvm_ops->owner))
467                 return -ENOENT;
468
469         kvm->arch.kvm_ops = kvm_ops;
470         r = kvmppc_core_init_vm(kvm);
471         if (r)
472                 module_put(kvm_ops->owner);
473         return r;
474 err_out:
475         return -EINVAL;
476 }
477
478 void kvm_arch_destroy_vm(struct kvm *kvm)
479 {
480 #ifdef CONFIG_KVM_XICS
481         /*
482          * We call kick_all_cpus_sync() to ensure that all
483          * CPUs have executed any pending IPIs before we
484          * continue and free VCPUs structures below.
485          */
486         if (is_kvmppc_hv_enabled(kvm))
487                 kick_all_cpus_sync();
488 #endif
489
490         kvm_destroy_vcpus(kvm);
491
492         mutex_lock(&kvm->lock);
493
494         kvmppc_core_destroy_vm(kvm);
495
496         mutex_unlock(&kvm->lock);
497
498         /* drop the module reference */
499         module_put(kvm->arch.kvm_ops->owner);
500 }
501
502 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
503 {
504         int r;
505         /* Assume we're using HV mode when the HV module is loaded */
506         int hv_enabled = kvmppc_hv_ops ? 1 : 0;
507
508         if (kvm) {
509                 /*
510                  * Hooray - we know which VM type we're running on. Depend on
511                  * that rather than the guess above.
512                  */
513                 hv_enabled = is_kvmppc_hv_enabled(kvm);
514         }
515
516         switch (ext) {
517 #ifdef CONFIG_BOOKE
518         case KVM_CAP_PPC_BOOKE_SREGS:
519         case KVM_CAP_PPC_BOOKE_WATCHDOG:
520         case KVM_CAP_PPC_EPR:
521 #else
522         case KVM_CAP_PPC_SEGSTATE:
523         case KVM_CAP_PPC_HIOR:
524         case KVM_CAP_PPC_PAPR:
525 #endif
526         case KVM_CAP_PPC_UNSET_IRQ:
527         case KVM_CAP_PPC_IRQ_LEVEL:
528         case KVM_CAP_ENABLE_CAP:
529         case KVM_CAP_ONE_REG:
530         case KVM_CAP_IOEVENTFD:
531         case KVM_CAP_IMMEDIATE_EXIT:
532         case KVM_CAP_SET_GUEST_DEBUG:
533                 r = 1;
534                 break;
535         case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
536         case KVM_CAP_PPC_PAIRED_SINGLES:
537         case KVM_CAP_PPC_OSI:
538         case KVM_CAP_PPC_GET_PVINFO:
539 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
540         case KVM_CAP_SW_TLB:
541 #endif
542                 /* We support this only for PR */
543                 r = !hv_enabled;
544                 break;
545 #ifdef CONFIG_KVM_MPIC
546         case KVM_CAP_IRQ_MPIC:
547                 r = 1;
548                 break;
549 #endif
550
551 #ifdef CONFIG_PPC_BOOK3S_64
552         case KVM_CAP_SPAPR_TCE:
553         case KVM_CAP_SPAPR_TCE_64:
554                 r = 1;
555                 break;
556         case KVM_CAP_SPAPR_TCE_VFIO:
557                 r = !!cpu_has_feature(CPU_FTR_HVMODE);
558                 break;
559         case KVM_CAP_PPC_RTAS:
560         case KVM_CAP_PPC_FIXUP_HCALL:
561         case KVM_CAP_PPC_ENABLE_HCALL:
562 #ifdef CONFIG_KVM_XICS
563         case KVM_CAP_IRQ_XICS:
564 #endif
565         case KVM_CAP_PPC_GET_CPU_CHAR:
566                 r = 1;
567                 break;
568 #ifdef CONFIG_KVM_XIVE
569         case KVM_CAP_PPC_IRQ_XIVE:
570                 /*
571                  * We need XIVE to be enabled on the platform (implies
572                  * a POWER9 processor) and the PowerNV platform, as
573                  * nested is not yet supported.
574                  */
575                 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
576                         kvmppc_xive_native_supported();
577                 break;
578 #endif
579
580 #ifdef CONFIG_HAVE_KVM_IRQCHIP
581         case KVM_CAP_IRQFD_RESAMPLE:
582                 r = !xive_enabled();
583                 break;
584 #endif
585
586         case KVM_CAP_PPC_ALLOC_HTAB:
587                 r = hv_enabled;
588                 break;
589 #endif /* CONFIG_PPC_BOOK3S_64 */
590 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
591         case KVM_CAP_PPC_SMT:
592                 r = 0;
593                 if (kvm) {
594                         if (kvm->arch.emul_smt_mode > 1)
595                                 r = kvm->arch.emul_smt_mode;
596                         else
597                                 r = kvm->arch.smt_mode;
598                 } else if (hv_enabled) {
599                         if (cpu_has_feature(CPU_FTR_ARCH_300))
600                                 r = 1;
601                         else
602                                 r = threads_per_subcore;
603                 }
604                 break;
605         case KVM_CAP_PPC_SMT_POSSIBLE:
606                 r = 1;
607                 if (hv_enabled) {
608                         if (!cpu_has_feature(CPU_FTR_ARCH_300))
609                                 r = ((threads_per_subcore << 1) - 1);
610                         else
611                                 /* P9 can emulate dbells, so allow any mode */
612                                 r = 8 | 4 | 2 | 1;
613                 }
614                 break;
615         case KVM_CAP_PPC_HWRNG:
616                 r = kvmppc_hwrng_present();
617                 break;
618         case KVM_CAP_PPC_MMU_RADIX:
619                 r = !!(hv_enabled && radix_enabled());
620                 break;
621         case KVM_CAP_PPC_MMU_HASH_V3:
622                 r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
623                        kvmppc_hv_ops->hash_v3_possible());
624                 break;
625         case KVM_CAP_PPC_NESTED_HV:
626                 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
627                        !kvmppc_hv_ops->enable_nested(NULL));
628                 break;
629 #endif
630         case KVM_CAP_SYNC_MMU:
631                 BUILD_BUG_ON(!IS_ENABLED(CONFIG_KVM_GENERIC_MMU_NOTIFIER));
632                 r = 1;
633                 break;
634 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
635         case KVM_CAP_PPC_HTAB_FD:
636                 r = hv_enabled;
637                 break;
638 #endif
639         case KVM_CAP_NR_VCPUS:
640                 /*
641                  * Recommending a number of CPUs is somewhat arbitrary; we
642                  * return the number of present CPUs for -HV (since a host
643                  * will have secondary threads "offline"), and for other KVM
644                  * implementations just count online CPUs.
645                  */
646                 if (hv_enabled)
647                         r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
648                 else
649                         r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
650                 break;
651         case KVM_CAP_MAX_VCPUS:
652                 r = KVM_MAX_VCPUS;
653                 break;
654         case KVM_CAP_MAX_VCPU_ID:
655                 r = KVM_MAX_VCPU_IDS;
656                 break;
657 #ifdef CONFIG_PPC_BOOK3S_64
658         case KVM_CAP_PPC_GET_SMMU_INFO:
659                 r = 1;
660                 break;
661         case KVM_CAP_SPAPR_MULTITCE:
662                 r = 1;
663                 break;
664         case KVM_CAP_SPAPR_RESIZE_HPT:
665                 r = !!hv_enabled;
666                 break;
667 #endif
668 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
669         case KVM_CAP_PPC_FWNMI:
670                 r = hv_enabled;
671                 break;
672 #endif
673 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
674         case KVM_CAP_PPC_HTM:
675                 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
676                      (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
677                 break;
678 #endif
679 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
680         case KVM_CAP_PPC_SECURE_GUEST:
681                 r = hv_enabled && kvmppc_hv_ops->enable_svm &&
682                         !kvmppc_hv_ops->enable_svm(NULL);
683                 break;
684         case KVM_CAP_PPC_DAWR1:
685                 r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
686                        !kvmppc_hv_ops->enable_dawr1(NULL));
687                 break;
688         case KVM_CAP_PPC_RPT_INVALIDATE:
689                 r = 1;
690                 break;
691 #endif
692         case KVM_CAP_PPC_AIL_MODE_3:
693                 r = 0;
694                 /*
695                  * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode.
696                  * The POWER9s can support it if the guest runs in hash mode,
697                  * but QEMU doesn't necessarily query the capability in time.
698                  */
699                 if (hv_enabled) {
700                         if (kvmhv_on_pseries()) {
701                                 if (pseries_reloc_on_exception())
702                                         r = 1;
703                         } else if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
704                                   !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
705                                 r = 1;
706                         }
707                 }
708                 break;
709         default:
710                 r = 0;
711                 break;
712         }
713         return r;
714
715 }
716
717 long kvm_arch_dev_ioctl(struct file *filp,
718                         unsigned int ioctl, unsigned long arg)
719 {
720         return -EINVAL;
721 }
722
723 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
724 {
725         kvmppc_core_free_memslot(kvm, slot);
726 }
727
728 int kvm_arch_prepare_memory_region(struct kvm *kvm,
729                                    const struct kvm_memory_slot *old,
730                                    struct kvm_memory_slot *new,
731                                    enum kvm_mr_change change)
732 {
733         return kvmppc_core_prepare_memory_region(kvm, old, new, change);
734 }
735
736 void kvm_arch_commit_memory_region(struct kvm *kvm,
737                                    struct kvm_memory_slot *old,
738                                    const struct kvm_memory_slot *new,
739                                    enum kvm_mr_change change)
740 {
741         kvmppc_core_commit_memory_region(kvm, old, new, change);
742 }
743
744 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
745                                    struct kvm_memory_slot *slot)
746 {
747         kvmppc_core_flush_memslot(kvm, slot);
748 }
749
750 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
751 {
752         return 0;
753 }
754
755 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
756 {
757         struct kvm_vcpu *vcpu;
758
759         vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
760         kvmppc_decrementer_func(vcpu);
761
762         return HRTIMER_NORESTART;
763 }
764
765 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
766 {
767         int err;
768
769         hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
770         vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
771
772 #ifdef CONFIG_KVM_EXIT_TIMING
773         mutex_init(&vcpu->arch.exit_timing_lock);
774 #endif
775         err = kvmppc_subarch_vcpu_init(vcpu);
776         if (err)
777                 return err;
778
779         err = kvmppc_core_vcpu_create(vcpu);
780         if (err)
781                 goto out_vcpu_uninit;
782
783         rcuwait_init(&vcpu->arch.wait);
784         vcpu->arch.waitp = &vcpu->arch.wait;
785         return 0;
786
787 out_vcpu_uninit:
788         kvmppc_subarch_vcpu_uninit(vcpu);
789         return err;
790 }
791
792 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
793 {
794 }
795
796 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
797 {
798         /* Make sure we're not using the vcpu anymore */
799         hrtimer_cancel(&vcpu->arch.dec_timer);
800
801         switch (vcpu->arch.irq_type) {
802         case KVMPPC_IRQ_MPIC:
803                 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
804                 break;
805         case KVMPPC_IRQ_XICS:
806                 if (xics_on_xive())
807                         kvmppc_xive_cleanup_vcpu(vcpu);
808                 else
809                         kvmppc_xics_free_icp(vcpu);
810                 break;
811         case KVMPPC_IRQ_XIVE:
812                 kvmppc_xive_native_cleanup_vcpu(vcpu);
813                 break;
814         }
815
816         kvmppc_core_vcpu_free(vcpu);
817
818         kvmppc_subarch_vcpu_uninit(vcpu);
819 }
820
821 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
822 {
823         return kvmppc_core_pending_dec(vcpu);
824 }
825
826 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
827 {
828 #ifdef CONFIG_BOOKE
829         /*
830          * vrsave (formerly usprg0) isn't used by Linux, but may
831          * be used by the guest.
832          *
833          * On non-booke this is associated with Altivec and
834          * is handled by code in book3s.c.
835          */
836         mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
837 #endif
838         kvmppc_core_vcpu_load(vcpu, cpu);
839 }
840
841 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
842 {
843         kvmppc_core_vcpu_put(vcpu);
844 #ifdef CONFIG_BOOKE
845         vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
846 #endif
847 }
848
849 /*
850  * irq_bypass_add_producer and irq_bypass_del_producer are only
851  * useful if the architecture supports PCI passthrough.
852  * irq_bypass_stop and irq_bypass_start are not needed and so
853  * kvm_ops are not defined for them.
854  */
855 bool kvm_arch_has_irq_bypass(void)
856 {
857         return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
858                 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
859 }
860
861 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
862                                      struct irq_bypass_producer *prod)
863 {
864         struct kvm_kernel_irqfd *irqfd =
865                 container_of(cons, struct kvm_kernel_irqfd, consumer);
866         struct kvm *kvm = irqfd->kvm;
867
868         if (kvm->arch.kvm_ops->irq_bypass_add_producer)
869                 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
870
871         return 0;
872 }
873
874 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
875                                       struct irq_bypass_producer *prod)
876 {
877         struct kvm_kernel_irqfd *irqfd =
878                 container_of(cons, struct kvm_kernel_irqfd, consumer);
879         struct kvm *kvm = irqfd->kvm;
880
881         if (kvm->arch.kvm_ops->irq_bypass_del_producer)
882                 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
883 }
884
885 #ifdef CONFIG_VSX
886 static inline int kvmppc_get_vsr_dword_offset(int index)
887 {
888         int offset;
889
890         if ((index != 0) && (index != 1))
891                 return -1;
892
893 #ifdef __BIG_ENDIAN
894         offset =  index;
895 #else
896         offset = 1 - index;
897 #endif
898
899         return offset;
900 }
901
902 static inline int kvmppc_get_vsr_word_offset(int index)
903 {
904         int offset;
905
906         if ((index > 3) || (index < 0))
907                 return -1;
908
909 #ifdef __BIG_ENDIAN
910         offset = index;
911 #else
912         offset = 3 - index;
913 #endif
914         return offset;
915 }
916
917 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
918         u64 gpr)
919 {
920         union kvmppc_one_reg val;
921         int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
922         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
923
924         if (offset == -1)
925                 return;
926
927         if (index >= 32) {
928                 kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
929                 val.vsxval[offset] = gpr;
930                 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
931         } else {
932                 kvmppc_set_vsx_fpr(vcpu, index, offset, gpr);
933         }
934 }
935
936 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
937         u64 gpr)
938 {
939         union kvmppc_one_reg val;
940         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
941
942         if (index >= 32) {
943                 kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
944                 val.vsxval[0] = gpr;
945                 val.vsxval[1] = gpr;
946                 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
947         } else {
948                 kvmppc_set_vsx_fpr(vcpu, index, 0, gpr);
949                 kvmppc_set_vsx_fpr(vcpu, index, 1,  gpr);
950         }
951 }
952
953 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
954         u32 gpr)
955 {
956         union kvmppc_one_reg val;
957         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
958
959         if (index >= 32) {
960                 val.vsx32val[0] = gpr;
961                 val.vsx32val[1] = gpr;
962                 val.vsx32val[2] = gpr;
963                 val.vsx32val[3] = gpr;
964                 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
965         } else {
966                 val.vsx32val[0] = gpr;
967                 val.vsx32val[1] = gpr;
968                 kvmppc_set_vsx_fpr(vcpu, index, 0, val.vsxval[0]);
969                 kvmppc_set_vsx_fpr(vcpu, index, 1, val.vsxval[0]);
970         }
971 }
972
973 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
974         u32 gpr32)
975 {
976         union kvmppc_one_reg val;
977         int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
978         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
979         int dword_offset, word_offset;
980
981         if (offset == -1)
982                 return;
983
984         if (index >= 32) {
985                 kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
986                 val.vsx32val[offset] = gpr32;
987                 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
988         } else {
989                 dword_offset = offset / 2;
990                 word_offset = offset % 2;
991                 val.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, index, dword_offset);
992                 val.vsx32val[word_offset] = gpr32;
993                 kvmppc_set_vsx_fpr(vcpu, index, dword_offset, val.vsxval[0]);
994         }
995 }
996 #endif /* CONFIG_VSX */
997
998 #ifdef CONFIG_ALTIVEC
999 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
1000                 int index, int element_size)
1001 {
1002         int offset;
1003         int elts = sizeof(vector128)/element_size;
1004
1005         if ((index < 0) || (index >= elts))
1006                 return -1;
1007
1008         if (kvmppc_need_byteswap(vcpu))
1009                 offset = elts - index - 1;
1010         else
1011                 offset = index;
1012
1013         return offset;
1014 }
1015
1016 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
1017                 int index)
1018 {
1019         return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1020 }
1021
1022 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1023                 int index)
1024 {
1025         return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1026 }
1027
1028 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1029                 int index)
1030 {
1031         return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1032 }
1033
1034 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1035                 int index)
1036 {
1037         return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1038 }
1039
1040
1041 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1042         u64 gpr)
1043 {
1044         union kvmppc_one_reg val;
1045         int offset = kvmppc_get_vmx_dword_offset(vcpu,
1046                         vcpu->arch.mmio_vmx_offset);
1047         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1048
1049         if (offset == -1)
1050                 return;
1051
1052         kvmppc_get_vsx_vr(vcpu, index, &val.vval);
1053         val.vsxval[offset] = gpr;
1054         kvmppc_set_vsx_vr(vcpu, index, &val.vval);
1055 }
1056
1057 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1058         u32 gpr32)
1059 {
1060         union kvmppc_one_reg val;
1061         int offset = kvmppc_get_vmx_word_offset(vcpu,
1062                         vcpu->arch.mmio_vmx_offset);
1063         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1064
1065         if (offset == -1)
1066                 return;
1067
1068         kvmppc_get_vsx_vr(vcpu, index, &val.vval);
1069         val.vsx32val[offset] = gpr32;
1070         kvmppc_set_vsx_vr(vcpu, index, &val.vval);
1071 }
1072
1073 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1074         u16 gpr16)
1075 {
1076         union kvmppc_one_reg val;
1077         int offset = kvmppc_get_vmx_hword_offset(vcpu,
1078                         vcpu->arch.mmio_vmx_offset);
1079         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1080
1081         if (offset == -1)
1082                 return;
1083
1084         kvmppc_get_vsx_vr(vcpu, index, &val.vval);
1085         val.vsx16val[offset] = gpr16;
1086         kvmppc_set_vsx_vr(vcpu, index, &val.vval);
1087 }
1088
1089 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1090         u8 gpr8)
1091 {
1092         union kvmppc_one_reg val;
1093         int offset = kvmppc_get_vmx_byte_offset(vcpu,
1094                         vcpu->arch.mmio_vmx_offset);
1095         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1096
1097         if (offset == -1)
1098                 return;
1099
1100         kvmppc_get_vsx_vr(vcpu, index, &val.vval);
1101         val.vsx8val[offset] = gpr8;
1102         kvmppc_set_vsx_vr(vcpu, index, &val.vval);
1103 }
1104 #endif /* CONFIG_ALTIVEC */
1105
1106 #ifdef CONFIG_PPC_FPU
1107 static inline u64 sp_to_dp(u32 fprs)
1108 {
1109         u64 fprd;
1110
1111         preempt_disable();
1112         enable_kernel_fp();
1113         asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs)
1114              : "fr0");
1115         preempt_enable();
1116         return fprd;
1117 }
1118
1119 static inline u32 dp_to_sp(u64 fprd)
1120 {
1121         u32 fprs;
1122
1123         preempt_disable();
1124         enable_kernel_fp();
1125         asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd)
1126              : "fr0");
1127         preempt_enable();
1128         return fprs;
1129 }
1130
1131 #else
1132 #define sp_to_dp(x)     (x)
1133 #define dp_to_sp(x)     (x)
1134 #endif /* CONFIG_PPC_FPU */
1135
1136 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
1137 {
1138         struct kvm_run *run = vcpu->run;
1139         u64 gpr;
1140
1141         if (run->mmio.len > sizeof(gpr))
1142                 return;
1143
1144         if (!vcpu->arch.mmio_host_swabbed) {
1145                 switch (run->mmio.len) {
1146                 case 8: gpr = *(u64 *)run->mmio.data; break;
1147                 case 4: gpr = *(u32 *)run->mmio.data; break;
1148                 case 2: gpr = *(u16 *)run->mmio.data; break;
1149                 case 1: gpr = *(u8 *)run->mmio.data; break;
1150                 }
1151         } else {
1152                 switch (run->mmio.len) {
1153                 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1154                 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1155                 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1156                 case 1: gpr = *(u8 *)run->mmio.data; break;
1157                 }
1158         }
1159
1160         /* conversion between single and double precision */
1161         if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1162                 gpr = sp_to_dp(gpr);
1163
1164         if (vcpu->arch.mmio_sign_extend) {
1165                 switch (run->mmio.len) {
1166 #ifdef CONFIG_PPC64
1167                 case 4:
1168                         gpr = (s64)(s32)gpr;
1169                         break;
1170 #endif
1171                 case 2:
1172                         gpr = (s64)(s16)gpr;
1173                         break;
1174                 case 1:
1175                         gpr = (s64)(s8)gpr;
1176                         break;
1177                 }
1178         }
1179
1180         switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1181         case KVM_MMIO_REG_GPR:
1182                 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1183                 break;
1184         case KVM_MMIO_REG_FPR:
1185                 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1186                         vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1187
1188                 kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr);
1189                 break;
1190 #ifdef CONFIG_PPC_BOOK3S
1191         case KVM_MMIO_REG_QPR:
1192                 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1193                 break;
1194         case KVM_MMIO_REG_FQPR:
1195                 kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr);
1196                 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1197                 break;
1198 #endif
1199 #ifdef CONFIG_VSX
1200         case KVM_MMIO_REG_VSX:
1201                 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1202                         vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1203
1204                 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1205                         kvmppc_set_vsr_dword(vcpu, gpr);
1206                 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1207                         kvmppc_set_vsr_word(vcpu, gpr);
1208                 else if (vcpu->arch.mmio_copy_type ==
1209                                 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1210                         kvmppc_set_vsr_dword_dump(vcpu, gpr);
1211                 else if (vcpu->arch.mmio_copy_type ==
1212                                 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1213                         kvmppc_set_vsr_word_dump(vcpu, gpr);
1214                 break;
1215 #endif
1216 #ifdef CONFIG_ALTIVEC
1217         case KVM_MMIO_REG_VMX:
1218                 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1219                         vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1220
1221                 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1222                         kvmppc_set_vmx_dword(vcpu, gpr);
1223                 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1224                         kvmppc_set_vmx_word(vcpu, gpr);
1225                 else if (vcpu->arch.mmio_copy_type ==
1226                                 KVMPPC_VMX_COPY_HWORD)
1227                         kvmppc_set_vmx_hword(vcpu, gpr);
1228                 else if (vcpu->arch.mmio_copy_type ==
1229                                 KVMPPC_VMX_COPY_BYTE)
1230                         kvmppc_set_vmx_byte(vcpu, gpr);
1231                 break;
1232 #endif
1233 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1234         case KVM_MMIO_REG_NESTED_GPR:
1235                 if (kvmppc_need_byteswap(vcpu))
1236                         gpr = swab64(gpr);
1237                 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1238                                      sizeof(gpr));
1239                 break;
1240 #endif
1241         default:
1242                 BUG();
1243         }
1244 }
1245
1246 static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
1247                                 unsigned int rt, unsigned int bytes,
1248                                 int is_default_endian, int sign_extend)
1249 {
1250         struct kvm_run *run = vcpu->run;
1251         int idx, ret;
1252         bool host_swabbed;
1253
1254         /* Pity C doesn't have a logical XOR operator */
1255         if (kvmppc_need_byteswap(vcpu)) {
1256                 host_swabbed = is_default_endian;
1257         } else {
1258                 host_swabbed = !is_default_endian;
1259         }
1260
1261         if (bytes > sizeof(run->mmio.data))
1262                 return EMULATE_FAIL;
1263
1264         run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1265         run->mmio.len = bytes;
1266         run->mmio.is_write = 0;
1267
1268         vcpu->arch.io_gpr = rt;
1269         vcpu->arch.mmio_host_swabbed = host_swabbed;
1270         vcpu->mmio_needed = 1;
1271         vcpu->mmio_is_write = 0;
1272         vcpu->arch.mmio_sign_extend = sign_extend;
1273
1274         idx = srcu_read_lock(&vcpu->kvm->srcu);
1275
1276         ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1277                               bytes, &run->mmio.data);
1278
1279         srcu_read_unlock(&vcpu->kvm->srcu, idx);
1280
1281         if (!ret) {
1282                 kvmppc_complete_mmio_load(vcpu);
1283                 vcpu->mmio_needed = 0;
1284                 return EMULATE_DONE;
1285         }
1286
1287         return EMULATE_DO_MMIO;
1288 }
1289
1290 int kvmppc_handle_load(struct kvm_vcpu *vcpu,
1291                        unsigned int rt, unsigned int bytes,
1292                        int is_default_endian)
1293 {
1294         return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
1295 }
1296 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1297
1298 /* Same as above, but sign extends */
1299 int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
1300                         unsigned int rt, unsigned int bytes,
1301                         int is_default_endian)
1302 {
1303         return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
1304 }
1305
1306 #ifdef CONFIG_VSX
1307 int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
1308                         unsigned int rt, unsigned int bytes,
1309                         int is_default_endian, int mmio_sign_extend)
1310 {
1311         enum emulation_result emulated = EMULATE_DONE;
1312
1313         /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1314         if (vcpu->arch.mmio_vsx_copy_nums > 4)
1315                 return EMULATE_FAIL;
1316
1317         while (vcpu->arch.mmio_vsx_copy_nums) {
1318                 emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1319                         is_default_endian, mmio_sign_extend);
1320
1321                 if (emulated != EMULATE_DONE)
1322                         break;
1323
1324                 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1325
1326                 vcpu->arch.mmio_vsx_copy_nums--;
1327                 vcpu->arch.mmio_vsx_offset++;
1328         }
1329         return emulated;
1330 }
1331 #endif /* CONFIG_VSX */
1332
1333 int kvmppc_handle_store(struct kvm_vcpu *vcpu,
1334                         u64 val, unsigned int bytes, int is_default_endian)
1335 {
1336         struct kvm_run *run = vcpu->run;
1337         void *data = run->mmio.data;
1338         int idx, ret;
1339         bool host_swabbed;
1340
1341         /* Pity C doesn't have a logical XOR operator */
1342         if (kvmppc_need_byteswap(vcpu)) {
1343                 host_swabbed = is_default_endian;
1344         } else {
1345                 host_swabbed = !is_default_endian;
1346         }
1347
1348         if (bytes > sizeof(run->mmio.data))
1349                 return EMULATE_FAIL;
1350
1351         run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1352         run->mmio.len = bytes;
1353         run->mmio.is_write = 1;
1354         vcpu->mmio_needed = 1;
1355         vcpu->mmio_is_write = 1;
1356
1357         if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1358                 val = dp_to_sp(val);
1359
1360         /* Store the value at the lowest bytes in 'data'. */
1361         if (!host_swabbed) {
1362                 switch (bytes) {
1363                 case 8: *(u64 *)data = val; break;
1364                 case 4: *(u32 *)data = val; break;
1365                 case 2: *(u16 *)data = val; break;
1366                 case 1: *(u8  *)data = val; break;
1367                 }
1368         } else {
1369                 switch (bytes) {
1370                 case 8: *(u64 *)data = swab64(val); break;
1371                 case 4: *(u32 *)data = swab32(val); break;
1372                 case 2: *(u16 *)data = swab16(val); break;
1373                 case 1: *(u8  *)data = val; break;
1374                 }
1375         }
1376
1377         idx = srcu_read_lock(&vcpu->kvm->srcu);
1378
1379         ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1380                                bytes, &run->mmio.data);
1381
1382         srcu_read_unlock(&vcpu->kvm->srcu, idx);
1383
1384         if (!ret) {
1385                 vcpu->mmio_needed = 0;
1386                 return EMULATE_DONE;
1387         }
1388
1389         return EMULATE_DO_MMIO;
1390 }
1391 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1392
1393 #ifdef CONFIG_VSX
1394 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1395 {
1396         u32 dword_offset, word_offset;
1397         union kvmppc_one_reg reg;
1398         int vsx_offset = 0;
1399         int copy_type = vcpu->arch.mmio_copy_type;
1400         int result = 0;
1401
1402         switch (copy_type) {
1403         case KVMPPC_VSX_COPY_DWORD:
1404                 vsx_offset =
1405                         kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1406
1407                 if (vsx_offset == -1) {
1408                         result = -1;
1409                         break;
1410                 }
1411
1412                 if (rs < 32) {
1413                         *val = kvmppc_get_vsx_fpr(vcpu, rs, vsx_offset);
1414                 } else {
1415                         kvmppc_get_vsx_vr(vcpu, rs - 32, &reg.vval);
1416                         *val = reg.vsxval[vsx_offset];
1417                 }
1418                 break;
1419
1420         case KVMPPC_VSX_COPY_WORD:
1421                 vsx_offset =
1422                         kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1423
1424                 if (vsx_offset == -1) {
1425                         result = -1;
1426                         break;
1427                 }
1428
1429                 if (rs < 32) {
1430                         dword_offset = vsx_offset / 2;
1431                         word_offset = vsx_offset % 2;
1432                         reg.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, rs, dword_offset);
1433                         *val = reg.vsx32val[word_offset];
1434                 } else {
1435                         kvmppc_get_vsx_vr(vcpu, rs - 32, &reg.vval);
1436                         *val = reg.vsx32val[vsx_offset];
1437                 }
1438                 break;
1439
1440         default:
1441                 result = -1;
1442                 break;
1443         }
1444
1445         return result;
1446 }
1447
1448 int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
1449                         int rs, unsigned int bytes, int is_default_endian)
1450 {
1451         u64 val;
1452         enum emulation_result emulated = EMULATE_DONE;
1453
1454         vcpu->arch.io_gpr = rs;
1455
1456         /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1457         if (vcpu->arch.mmio_vsx_copy_nums > 4)
1458                 return EMULATE_FAIL;
1459
1460         while (vcpu->arch.mmio_vsx_copy_nums) {
1461                 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1462                         return EMULATE_FAIL;
1463
1464                 emulated = kvmppc_handle_store(vcpu,
1465                          val, bytes, is_default_endian);
1466
1467                 if (emulated != EMULATE_DONE)
1468                         break;
1469
1470                 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1471
1472                 vcpu->arch.mmio_vsx_copy_nums--;
1473                 vcpu->arch.mmio_vsx_offset++;
1474         }
1475
1476         return emulated;
1477 }
1478
1479 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
1480 {
1481         struct kvm_run *run = vcpu->run;
1482         enum emulation_result emulated = EMULATE_FAIL;
1483         int r;
1484
1485         vcpu->arch.paddr_accessed += run->mmio.len;
1486
1487         if (!vcpu->mmio_is_write) {
1488                 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
1489                          run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1490         } else {
1491                 emulated = kvmppc_handle_vsx_store(vcpu,
1492                          vcpu->arch.io_gpr, run->mmio.len, 1);
1493         }
1494
1495         switch (emulated) {
1496         case EMULATE_DO_MMIO:
1497                 run->exit_reason = KVM_EXIT_MMIO;
1498                 r = RESUME_HOST;
1499                 break;
1500         case EMULATE_FAIL:
1501                 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1502                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1503                 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1504                 r = RESUME_HOST;
1505                 break;
1506         default:
1507                 r = RESUME_GUEST;
1508                 break;
1509         }
1510         return r;
1511 }
1512 #endif /* CONFIG_VSX */
1513
1514 #ifdef CONFIG_ALTIVEC
1515 int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
1516                 unsigned int rt, unsigned int bytes, int is_default_endian)
1517 {
1518         enum emulation_result emulated = EMULATE_DONE;
1519
1520         if (vcpu->arch.mmio_vmx_copy_nums > 2)
1521                 return EMULATE_FAIL;
1522
1523         while (vcpu->arch.mmio_vmx_copy_nums) {
1524                 emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1525                                 is_default_endian, 0);
1526
1527                 if (emulated != EMULATE_DONE)
1528                         break;
1529
1530                 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1531                 vcpu->arch.mmio_vmx_copy_nums--;
1532                 vcpu->arch.mmio_vmx_offset++;
1533         }
1534
1535         return emulated;
1536 }
1537
1538 static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1539 {
1540         union kvmppc_one_reg reg;
1541         int vmx_offset = 0;
1542         int result = 0;
1543
1544         vmx_offset =
1545                 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1546
1547         if (vmx_offset == -1)
1548                 return -1;
1549
1550         kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
1551         *val = reg.vsxval[vmx_offset];
1552
1553         return result;
1554 }
1555
1556 static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1557 {
1558         union kvmppc_one_reg reg;
1559         int vmx_offset = 0;
1560         int result = 0;
1561
1562         vmx_offset =
1563                 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1564
1565         if (vmx_offset == -1)
1566                 return -1;
1567
1568         kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
1569         *val = reg.vsx32val[vmx_offset];
1570
1571         return result;
1572 }
1573
1574 static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1575 {
1576         union kvmppc_one_reg reg;
1577         int vmx_offset = 0;
1578         int result = 0;
1579
1580         vmx_offset =
1581                 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1582
1583         if (vmx_offset == -1)
1584                 return -1;
1585
1586         kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
1587         *val = reg.vsx16val[vmx_offset];
1588
1589         return result;
1590 }
1591
1592 static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1593 {
1594         union kvmppc_one_reg reg;
1595         int vmx_offset = 0;
1596         int result = 0;
1597
1598         vmx_offset =
1599                 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1600
1601         if (vmx_offset == -1)
1602                 return -1;
1603
1604         kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
1605         *val = reg.vsx8val[vmx_offset];
1606
1607         return result;
1608 }
1609
1610 int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
1611                 unsigned int rs, unsigned int bytes, int is_default_endian)
1612 {
1613         u64 val = 0;
1614         unsigned int index = rs & KVM_MMIO_REG_MASK;
1615         enum emulation_result emulated = EMULATE_DONE;
1616
1617         if (vcpu->arch.mmio_vmx_copy_nums > 2)
1618                 return EMULATE_FAIL;
1619
1620         vcpu->arch.io_gpr = rs;
1621
1622         while (vcpu->arch.mmio_vmx_copy_nums) {
1623                 switch (vcpu->arch.mmio_copy_type) {
1624                 case KVMPPC_VMX_COPY_DWORD:
1625                         if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1626                                 return EMULATE_FAIL;
1627
1628                         break;
1629                 case KVMPPC_VMX_COPY_WORD:
1630                         if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1631                                 return EMULATE_FAIL;
1632                         break;
1633                 case KVMPPC_VMX_COPY_HWORD:
1634                         if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1635                                 return EMULATE_FAIL;
1636                         break;
1637                 case KVMPPC_VMX_COPY_BYTE:
1638                         if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1639                                 return EMULATE_FAIL;
1640                         break;
1641                 default:
1642                         return EMULATE_FAIL;
1643                 }
1644
1645                 emulated = kvmppc_handle_store(vcpu, val, bytes,
1646                                 is_default_endian);
1647                 if (emulated != EMULATE_DONE)
1648                         break;
1649
1650                 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1651                 vcpu->arch.mmio_vmx_copy_nums--;
1652                 vcpu->arch.mmio_vmx_offset++;
1653         }
1654
1655         return emulated;
1656 }
1657
1658 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
1659 {
1660         struct kvm_run *run = vcpu->run;
1661         enum emulation_result emulated = EMULATE_FAIL;
1662         int r;
1663
1664         vcpu->arch.paddr_accessed += run->mmio.len;
1665
1666         if (!vcpu->mmio_is_write) {
1667                 emulated = kvmppc_handle_vmx_load(vcpu,
1668                                 vcpu->arch.io_gpr, run->mmio.len, 1);
1669         } else {
1670                 emulated = kvmppc_handle_vmx_store(vcpu,
1671                                 vcpu->arch.io_gpr, run->mmio.len, 1);
1672         }
1673
1674         switch (emulated) {
1675         case EMULATE_DO_MMIO:
1676                 run->exit_reason = KVM_EXIT_MMIO;
1677                 r = RESUME_HOST;
1678                 break;
1679         case EMULATE_FAIL:
1680                 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1681                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1682                 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1683                 r = RESUME_HOST;
1684                 break;
1685         default:
1686                 r = RESUME_GUEST;
1687                 break;
1688         }
1689         return r;
1690 }
1691 #endif /* CONFIG_ALTIVEC */
1692
1693 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1694 {
1695         int r = 0;
1696         union kvmppc_one_reg val;
1697         int size;
1698
1699         size = one_reg_size(reg->id);
1700         if (size > sizeof(val))
1701                 return -EINVAL;
1702
1703         r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1704         if (r == -EINVAL) {
1705                 r = 0;
1706                 switch (reg->id) {
1707 #ifdef CONFIG_ALTIVEC
1708                 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1709                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1710                                 r = -ENXIO;
1711                                 break;
1712                         }
1713                         kvmppc_get_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval);
1714                         break;
1715                 case KVM_REG_PPC_VSCR:
1716                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1717                                 r = -ENXIO;
1718                                 break;
1719                         }
1720                         val = get_reg_val(reg->id, kvmppc_get_vscr(vcpu));
1721                         break;
1722                 case KVM_REG_PPC_VRSAVE:
1723                         val = get_reg_val(reg->id, kvmppc_get_vrsave(vcpu));
1724                         break;
1725 #endif /* CONFIG_ALTIVEC */
1726                 default:
1727                         r = -EINVAL;
1728                         break;
1729                 }
1730         }
1731
1732         if (r)
1733                 return r;
1734
1735         if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1736                 r = -EFAULT;
1737
1738         return r;
1739 }
1740
1741 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1742 {
1743         int r;
1744         union kvmppc_one_reg val;
1745         int size;
1746
1747         size = one_reg_size(reg->id);
1748         if (size > sizeof(val))
1749                 return -EINVAL;
1750
1751         if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1752                 return -EFAULT;
1753
1754         r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1755         if (r == -EINVAL) {
1756                 r = 0;
1757                 switch (reg->id) {
1758 #ifdef CONFIG_ALTIVEC
1759                 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1760                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1761                                 r = -ENXIO;
1762                                 break;
1763                         }
1764                         kvmppc_set_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval);
1765                         break;
1766                 case KVM_REG_PPC_VSCR:
1767                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1768                                 r = -ENXIO;
1769                                 break;
1770                         }
1771                         kvmppc_set_vscr(vcpu, set_reg_val(reg->id, val));
1772                         break;
1773                 case KVM_REG_PPC_VRSAVE:
1774                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1775                                 r = -ENXIO;
1776                                 break;
1777                         }
1778                         kvmppc_set_vrsave(vcpu, set_reg_val(reg->id, val));
1779                         break;
1780 #endif /* CONFIG_ALTIVEC */
1781                 default:
1782                         r = -EINVAL;
1783                         break;
1784                 }
1785         }
1786
1787         return r;
1788 }
1789
1790 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1791 {
1792         struct kvm_run *run = vcpu->run;
1793         int r;
1794
1795         vcpu_load(vcpu);
1796
1797         if (vcpu->mmio_needed) {
1798                 vcpu->mmio_needed = 0;
1799                 if (!vcpu->mmio_is_write)
1800                         kvmppc_complete_mmio_load(vcpu);
1801 #ifdef CONFIG_VSX
1802                 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1803                         vcpu->arch.mmio_vsx_copy_nums--;
1804                         vcpu->arch.mmio_vsx_offset++;
1805                 }
1806
1807                 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1808                         r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
1809                         if (r == RESUME_HOST) {
1810                                 vcpu->mmio_needed = 1;
1811                                 goto out;
1812                         }
1813                 }
1814 #endif
1815 #ifdef CONFIG_ALTIVEC
1816                 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1817                         vcpu->arch.mmio_vmx_copy_nums--;
1818                         vcpu->arch.mmio_vmx_offset++;
1819                 }
1820
1821                 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1822                         r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
1823                         if (r == RESUME_HOST) {
1824                                 vcpu->mmio_needed = 1;
1825                                 goto out;
1826                         }
1827                 }
1828 #endif
1829         } else if (vcpu->arch.osi_needed) {
1830                 u64 *gprs = run->osi.gprs;
1831                 int i;
1832
1833                 for (i = 0; i < 32; i++)
1834                         kvmppc_set_gpr(vcpu, i, gprs[i]);
1835                 vcpu->arch.osi_needed = 0;
1836         } else if (vcpu->arch.hcall_needed) {
1837                 int i;
1838
1839                 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1840                 for (i = 0; i < 9; ++i)
1841                         kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1842                 vcpu->arch.hcall_needed = 0;
1843 #ifdef CONFIG_BOOKE
1844         } else if (vcpu->arch.epr_needed) {
1845                 kvmppc_set_epr(vcpu, run->epr.epr);
1846                 vcpu->arch.epr_needed = 0;
1847 #endif
1848         }
1849
1850         kvm_sigset_activate(vcpu);
1851
1852         if (!vcpu->wants_to_run)
1853                 r = -EINTR;
1854         else
1855                 r = kvmppc_vcpu_run(vcpu);
1856
1857         kvm_sigset_deactivate(vcpu);
1858
1859 #ifdef CONFIG_ALTIVEC
1860 out:
1861 #endif
1862
1863         /*
1864          * We're already returning to userspace, don't pass the
1865          * RESUME_HOST flags along.
1866          */
1867         if (r > 0)
1868                 r = 0;
1869
1870         vcpu_put(vcpu);
1871         return r;
1872 }
1873
1874 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1875 {
1876         if (irq->irq == KVM_INTERRUPT_UNSET) {
1877                 kvmppc_core_dequeue_external(vcpu);
1878                 return 0;
1879         }
1880
1881         kvmppc_core_queue_external(vcpu, irq);
1882
1883         kvm_vcpu_kick(vcpu);
1884
1885         return 0;
1886 }
1887
1888 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1889                                      struct kvm_enable_cap *cap)
1890 {
1891         int r;
1892
1893         if (cap->flags)
1894                 return -EINVAL;
1895
1896         switch (cap->cap) {
1897         case KVM_CAP_PPC_OSI:
1898                 r = 0;
1899                 vcpu->arch.osi_enabled = true;
1900                 break;
1901         case KVM_CAP_PPC_PAPR:
1902                 r = 0;
1903                 vcpu->arch.papr_enabled = true;
1904                 break;
1905         case KVM_CAP_PPC_EPR:
1906                 r = 0;
1907                 if (cap->args[0])
1908                         vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1909                 else
1910                         vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1911                 break;
1912 #ifdef CONFIG_BOOKE
1913         case KVM_CAP_PPC_BOOKE_WATCHDOG:
1914                 r = 0;
1915                 vcpu->arch.watchdog_enabled = true;
1916                 break;
1917 #endif
1918 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1919         case KVM_CAP_SW_TLB: {
1920                 struct kvm_config_tlb cfg;
1921                 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1922
1923                 r = -EFAULT;
1924                 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1925                         break;
1926
1927                 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1928                 break;
1929         }
1930 #endif
1931 #ifdef CONFIG_KVM_MPIC
1932         case KVM_CAP_IRQ_MPIC: {
1933                 CLASS(fd, f)(cap->args[0]);
1934                 struct kvm_device *dev;
1935
1936                 r = -EBADF;
1937                 if (fd_empty(f))
1938                         break;
1939
1940                 r = -EPERM;
1941                 dev = kvm_device_from_filp(fd_file(f));
1942                 if (dev)
1943                         r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1944
1945                 break;
1946         }
1947 #endif
1948 #ifdef CONFIG_KVM_XICS
1949         case KVM_CAP_IRQ_XICS: {
1950                 CLASS(fd, f)(cap->args[0]);
1951                 struct kvm_device *dev;
1952
1953                 r = -EBADF;
1954                 if (fd_empty(f))
1955                         break;
1956
1957                 r = -EPERM;
1958                 dev = kvm_device_from_filp(fd_file(f));
1959                 if (dev) {
1960                         if (xics_on_xive())
1961                                 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1962                         else
1963                                 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1964                 }
1965                 break;
1966         }
1967 #endif /* CONFIG_KVM_XICS */
1968 #ifdef CONFIG_KVM_XIVE
1969         case KVM_CAP_PPC_IRQ_XIVE: {
1970                 CLASS(fd, f)(cap->args[0]);
1971                 struct kvm_device *dev;
1972
1973                 r = -EBADF;
1974                 if (fd_empty(f))
1975                         break;
1976
1977                 r = -ENXIO;
1978                 if (!xive_enabled())
1979                         break;
1980
1981                 r = -EPERM;
1982                 dev = kvm_device_from_filp(fd_file(f));
1983                 if (dev)
1984                         r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
1985                                                             cap->args[1]);
1986                 break;
1987         }
1988 #endif /* CONFIG_KVM_XIVE */
1989 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1990         case KVM_CAP_PPC_FWNMI:
1991                 r = -EINVAL;
1992                 if (!is_kvmppc_hv_enabled(vcpu->kvm))
1993                         break;
1994                 r = 0;
1995                 vcpu->kvm->arch.fwnmi_enabled = true;
1996                 break;
1997 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1998         default:
1999                 r = -EINVAL;
2000                 break;
2001         }
2002
2003         if (!r)
2004                 r = kvmppc_sanity_check(vcpu);
2005
2006         return r;
2007 }
2008
2009 bool kvm_arch_intc_initialized(struct kvm *kvm)
2010 {
2011 #ifdef CONFIG_KVM_MPIC
2012         if (kvm->arch.mpic)
2013                 return true;
2014 #endif
2015 #ifdef CONFIG_KVM_XICS
2016         if (kvm->arch.xics || kvm->arch.xive)
2017                 return true;
2018 #endif
2019         return false;
2020 }
2021
2022 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2023                                     struct kvm_mp_state *mp_state)
2024 {
2025         return -EINVAL;
2026 }
2027
2028 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2029                                     struct kvm_mp_state *mp_state)
2030 {
2031         return -EINVAL;
2032 }
2033
2034 long kvm_arch_vcpu_async_ioctl(struct file *filp,
2035                                unsigned int ioctl, unsigned long arg)
2036 {
2037         struct kvm_vcpu *vcpu = filp->private_data;
2038         void __user *argp = (void __user *)arg;
2039
2040         if (ioctl == KVM_INTERRUPT) {
2041                 struct kvm_interrupt irq;
2042                 if (copy_from_user(&irq, argp, sizeof(irq)))
2043                         return -EFAULT;
2044                 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2045         }
2046         return -ENOIOCTLCMD;
2047 }
2048
2049 long kvm_arch_vcpu_ioctl(struct file *filp,
2050                          unsigned int ioctl, unsigned long arg)
2051 {
2052         struct kvm_vcpu *vcpu = filp->private_data;
2053         void __user *argp = (void __user *)arg;
2054         long r;
2055
2056         switch (ioctl) {
2057         case KVM_ENABLE_CAP:
2058         {
2059                 struct kvm_enable_cap cap;
2060                 r = -EFAULT;
2061                 if (copy_from_user(&cap, argp, sizeof(cap)))
2062                         goto out;
2063                 vcpu_load(vcpu);
2064                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2065                 vcpu_put(vcpu);
2066                 break;
2067         }
2068
2069         case KVM_SET_ONE_REG:
2070         case KVM_GET_ONE_REG:
2071         {
2072                 struct kvm_one_reg reg;
2073                 r = -EFAULT;
2074                 if (copy_from_user(&reg, argp, sizeof(reg)))
2075                         goto out;
2076                 if (ioctl == KVM_SET_ONE_REG)
2077                         r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
2078                 else
2079                         r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
2080                 break;
2081         }
2082
2083 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2084         case KVM_DIRTY_TLB: {
2085                 struct kvm_dirty_tlb dirty;
2086                 r = -EFAULT;
2087                 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2088                         goto out;
2089                 vcpu_load(vcpu);
2090                 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2091                 vcpu_put(vcpu);
2092                 break;
2093         }
2094 #endif
2095         default:
2096                 r = -EINVAL;
2097         }
2098
2099 out:
2100         return r;
2101 }
2102
2103 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2104 {
2105         return VM_FAULT_SIGBUS;
2106 }
2107
2108 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2109 {
2110         u32 inst_nop = 0x60000000;
2111 #ifdef CONFIG_KVM_BOOKE_HV
2112         u32 inst_sc1 = 0x44000022;
2113         pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2114         pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2115         pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2116         pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2117 #else
2118         u32 inst_lis = 0x3c000000;
2119         u32 inst_ori = 0x60000000;
2120         u32 inst_sc = 0x44000002;
2121         u32 inst_imm_mask = 0xffff;
2122
2123         /*
2124          * The hypercall to get into KVM from within guest context is as
2125          * follows:
2126          *
2127          *    lis r0, r0, KVM_SC_MAGIC_R0@h
2128          *    ori r0, KVM_SC_MAGIC_R0@l
2129          *    sc
2130          *    nop
2131          */
2132         pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2133         pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2134         pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2135         pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2136 #endif
2137
2138         pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2139
2140         return 0;
2141 }
2142
2143 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
2144 {
2145         int ret = 0;
2146
2147 #ifdef CONFIG_KVM_MPIC
2148         ret = ret || (kvm->arch.mpic != NULL);
2149 #endif
2150 #ifdef CONFIG_KVM_XICS
2151         ret = ret || (kvm->arch.xics != NULL);
2152         ret = ret || (kvm->arch.xive != NULL);
2153 #endif
2154         smp_rmb();
2155         return ret;
2156 }
2157
2158 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2159                           bool line_status)
2160 {
2161         if (!kvm_arch_irqchip_in_kernel(kvm))
2162                 return -ENXIO;
2163
2164         irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2165                                         irq_event->irq, irq_event->level,
2166                                         line_status);
2167         return 0;
2168 }
2169
2170
2171 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2172                             struct kvm_enable_cap *cap)
2173 {
2174         int r;
2175
2176         if (cap->flags)
2177                 return -EINVAL;
2178
2179         switch (cap->cap) {
2180 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2181         case KVM_CAP_PPC_ENABLE_HCALL: {
2182                 unsigned long hcall = cap->args[0];
2183
2184                 r = -EINVAL;
2185                 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2186                     cap->args[1] > 1)
2187                         break;
2188                 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2189                         break;
2190                 if (cap->args[1])
2191                         set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2192                 else
2193                         clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2194                 r = 0;
2195                 break;
2196         }
2197         case KVM_CAP_PPC_SMT: {
2198                 unsigned long mode = cap->args[0];
2199                 unsigned long flags = cap->args[1];
2200
2201                 r = -EINVAL;
2202                 if (kvm->arch.kvm_ops->set_smt_mode)
2203                         r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2204                 break;
2205         }
2206
2207         case KVM_CAP_PPC_NESTED_HV:
2208                 r = -EINVAL;
2209                 if (!is_kvmppc_hv_enabled(kvm) ||
2210                     !kvm->arch.kvm_ops->enable_nested)
2211                         break;
2212                 r = kvm->arch.kvm_ops->enable_nested(kvm);
2213                 break;
2214 #endif
2215 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2216         case KVM_CAP_PPC_SECURE_GUEST:
2217                 r = -EINVAL;
2218                 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2219                         break;
2220                 r = kvm->arch.kvm_ops->enable_svm(kvm);
2221                 break;
2222         case KVM_CAP_PPC_DAWR1:
2223                 r = -EINVAL;
2224                 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
2225                         break;
2226                 r = kvm->arch.kvm_ops->enable_dawr1(kvm);
2227                 break;
2228 #endif
2229         default:
2230                 r = -EINVAL;
2231                 break;
2232         }
2233
2234         return r;
2235 }
2236
2237 #ifdef CONFIG_PPC_BOOK3S_64
2238 /*
2239  * These functions check whether the underlying hardware is safe
2240  * against attacks based on observing the effects of speculatively
2241  * executed instructions, and whether it supplies instructions for
2242  * use in workarounds.  The information comes from firmware, either
2243  * via the device tree on powernv platforms or from an hcall on
2244  * pseries platforms.
2245  */
2246 #ifdef CONFIG_PPC_PSERIES
2247 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2248 {
2249         struct h_cpu_char_result c;
2250         unsigned long rc;
2251
2252         if (!machine_is(pseries))
2253                 return -ENOTTY;
2254
2255         rc = plpar_get_cpu_characteristics(&c);
2256         if (rc == H_SUCCESS) {
2257                 cp->character = c.character;
2258                 cp->behaviour = c.behaviour;
2259                 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2260                         KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2261                         KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2262                         KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2263                         KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2264                         KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2265                         KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2266                         KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2267                         KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2268                 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2269                         KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2270                         KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2271                         KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2272         }
2273         return 0;
2274 }
2275 #else
2276 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2277 {
2278         return -ENOTTY;
2279 }
2280 #endif
2281
2282 static inline bool have_fw_feat(struct device_node *fw_features,
2283                                 const char *state, const char *name)
2284 {
2285         struct device_node *np;
2286         bool r = false;
2287
2288         np = of_get_child_by_name(fw_features, name);
2289         if (np) {
2290                 r = of_property_read_bool(np, state);
2291                 of_node_put(np);
2292         }
2293         return r;
2294 }
2295
2296 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2297 {
2298         struct device_node *np, *fw_features;
2299         int r;
2300
2301         memset(cp, 0, sizeof(*cp));
2302         r = pseries_get_cpu_char(cp);
2303         if (r != -ENOTTY)
2304                 return r;
2305
2306         np = of_find_node_by_name(NULL, "ibm,opal");
2307         if (np) {
2308                 fw_features = of_get_child_by_name(np, "fw-features");
2309                 of_node_put(np);
2310                 if (!fw_features)
2311                         return 0;
2312                 if (have_fw_feat(fw_features, "enabled",
2313                                  "inst-spec-barrier-ori31,31,0"))
2314                         cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2315                 if (have_fw_feat(fw_features, "enabled",
2316                                  "fw-bcctrl-serialized"))
2317                         cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2318                 if (have_fw_feat(fw_features, "enabled",
2319                                  "inst-l1d-flush-ori30,30,0"))
2320                         cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2321                 if (have_fw_feat(fw_features, "enabled",
2322                                  "inst-l1d-flush-trig2"))
2323                         cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2324                 if (have_fw_feat(fw_features, "enabled",
2325                                  "fw-l1d-thread-split"))
2326                         cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2327                 if (have_fw_feat(fw_features, "enabled",
2328                                  "fw-count-cache-disabled"))
2329                         cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2330                 if (have_fw_feat(fw_features, "enabled",
2331                                  "fw-count-cache-flush-bcctr2,0,0"))
2332                         cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2333                 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2334                         KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2335                         KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2336                         KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2337                         KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2338                         KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2339                         KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2340
2341                 if (have_fw_feat(fw_features, "enabled",
2342                                  "speculation-policy-favor-security"))
2343                         cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2344                 if (!have_fw_feat(fw_features, "disabled",
2345                                   "needs-l1d-flush-msr-pr-0-to-1"))
2346                         cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2347                 if (!have_fw_feat(fw_features, "disabled",
2348                                   "needs-spec-barrier-for-bound-checks"))
2349                         cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2350                 if (have_fw_feat(fw_features, "enabled",
2351                                  "needs-count-cache-flush-on-context-switch"))
2352                         cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2353                 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2354                         KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2355                         KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2356                         KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2357
2358                 of_node_put(fw_features);
2359         }
2360
2361         return 0;
2362 }
2363 #endif
2364
2365 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
2366 {
2367         struct kvm *kvm __maybe_unused = filp->private_data;
2368         void __user *argp = (void __user *)arg;
2369         int r;
2370
2371         switch (ioctl) {
2372         case KVM_PPC_GET_PVINFO: {
2373                 struct kvm_ppc_pvinfo pvinfo;
2374                 memset(&pvinfo, 0, sizeof(pvinfo));
2375                 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2376                 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2377                         r = -EFAULT;
2378                         goto out;
2379                 }
2380
2381                 break;
2382         }
2383 #ifdef CONFIG_SPAPR_TCE_IOMMU
2384         case KVM_CREATE_SPAPR_TCE_64: {
2385                 struct kvm_create_spapr_tce_64 create_tce_64;
2386
2387                 r = -EFAULT;
2388                 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2389                         goto out;
2390                 if (create_tce_64.flags) {
2391                         r = -EINVAL;
2392                         goto out;
2393                 }
2394                 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2395                 goto out;
2396         }
2397         case KVM_CREATE_SPAPR_TCE: {
2398                 struct kvm_create_spapr_tce create_tce;
2399                 struct kvm_create_spapr_tce_64 create_tce_64;
2400
2401                 r = -EFAULT;
2402                 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2403                         goto out;
2404
2405                 create_tce_64.liobn = create_tce.liobn;
2406                 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2407                 create_tce_64.offset = 0;
2408                 create_tce_64.size = create_tce.window_size >>
2409                                 IOMMU_PAGE_SHIFT_4K;
2410                 create_tce_64.flags = 0;
2411                 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2412                 goto out;
2413         }
2414 #endif
2415 #ifdef CONFIG_PPC_BOOK3S_64
2416         case KVM_PPC_GET_SMMU_INFO: {
2417                 struct kvm_ppc_smmu_info info;
2418                 struct kvm *kvm = filp->private_data;
2419
2420                 memset(&info, 0, sizeof(info));
2421                 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2422                 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2423                         r = -EFAULT;
2424                 break;
2425         }
2426         case KVM_PPC_RTAS_DEFINE_TOKEN: {
2427                 struct kvm *kvm = filp->private_data;
2428
2429                 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2430                 break;
2431         }
2432         case KVM_PPC_CONFIGURE_V3_MMU: {
2433                 struct kvm *kvm = filp->private_data;
2434                 struct kvm_ppc_mmuv3_cfg cfg;
2435
2436                 r = -EINVAL;
2437                 if (!kvm->arch.kvm_ops->configure_mmu)
2438                         goto out;
2439                 r = -EFAULT;
2440                 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2441                         goto out;
2442                 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2443                 break;
2444         }
2445         case KVM_PPC_GET_RMMU_INFO: {
2446                 struct kvm *kvm = filp->private_data;
2447                 struct kvm_ppc_rmmu_info info;
2448
2449                 r = -EINVAL;
2450                 if (!kvm->arch.kvm_ops->get_rmmu_info)
2451                         goto out;
2452                 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2453                 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2454                         r = -EFAULT;
2455                 break;
2456         }
2457         case KVM_PPC_GET_CPU_CHAR: {
2458                 struct kvm_ppc_cpu_char cpuchar;
2459
2460                 r = kvmppc_get_cpu_char(&cpuchar);
2461                 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2462                         r = -EFAULT;
2463                 break;
2464         }
2465         case KVM_PPC_SVM_OFF: {
2466                 struct kvm *kvm = filp->private_data;
2467
2468                 r = 0;
2469                 if (!kvm->arch.kvm_ops->svm_off)
2470                         goto out;
2471
2472                 r = kvm->arch.kvm_ops->svm_off(kvm);
2473                 break;
2474         }
2475         default: {
2476                 struct kvm *kvm = filp->private_data;
2477                 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2478         }
2479 #else /* CONFIG_PPC_BOOK3S_64 */
2480         default:
2481                 r = -ENOTTY;
2482 #endif
2483         }
2484 out:
2485         return r;
2486 }
2487
2488 static DEFINE_IDA(lpid_inuse);
2489 static unsigned long nr_lpids;
2490
2491 long kvmppc_alloc_lpid(void)
2492 {
2493         int lpid;
2494
2495         /* The host LPID must always be 0 (allocation starts at 1) */
2496         lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL);
2497         if (lpid < 0) {
2498                 if (lpid == -ENOMEM)
2499                         pr_err("%s: Out of memory\n", __func__);
2500                 else
2501                         pr_err("%s: No LPIDs free\n", __func__);
2502                 return -ENOMEM;
2503         }
2504
2505         return lpid;
2506 }
2507 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2508
2509 void kvmppc_free_lpid(long lpid)
2510 {
2511         ida_free(&lpid_inuse, lpid);
2512 }
2513 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2514
2515 /* nr_lpids_param includes the host LPID */
2516 void kvmppc_init_lpid(unsigned long nr_lpids_param)
2517 {
2518         nr_lpids = nr_lpids_param;
2519 }
2520 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2521
2522 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
2523
2524 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
2525 {
2526         if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs)
2527                 vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
2528 }
2529
2530 void kvm_arch_create_vm_debugfs(struct kvm *kvm)
2531 {
2532         if (kvm->arch.kvm_ops->create_vm_debugfs)
2533                 kvm->arch.kvm_ops->create_vm_debugfs(kvm);
2534 }
This page took 0.225949 seconds and 4 git commands to generate.