2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2008
20 #ifndef __POWERPC_KVM_PPC_H__
21 #define __POWERPC_KVM_PPC_H__
23 /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
26 #include <linux/mutex.h>
27 #include <linux/timer.h>
28 #include <linux/types.h>
29 #include <linux/kvm_types.h>
30 #include <linux/kvm_host.h>
31 #include <linux/bug.h>
32 #ifdef CONFIG_PPC_BOOK3S
33 #include <asm/kvm_book3s.h>
35 #include <asm/kvm_booke.h>
37 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
42 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
43 * for supporting software breakpoint.
45 #define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
47 enum emulation_result {
48 EMULATE_DONE, /* no further processing */
49 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
50 EMULATE_FAIL, /* can't emulate this instruction */
51 EMULATE_AGAIN, /* something went wrong. go again */
52 EMULATE_EXIT_USER, /* emulation requires exit to user-space */
55 enum instruction_type {
57 INST_SC, /* system call */
61 XLATE_INST, /* translate instruction address */
62 XLATE_DATA /* translate data address */
65 enum xlate_readwrite {
66 XLATE_READ, /* check for read permissions */
67 XLATE_WRITE /* check for write permissions */
70 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
71 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
72 extern void kvmppc_handler_highmem(void);
74 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
75 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
76 unsigned int rt, unsigned int bytes,
77 int is_default_endian);
78 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
79 unsigned int rt, unsigned int bytes,
80 int is_default_endian);
81 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
82 u64 val, unsigned int bytes,
83 int is_default_endian);
85 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
86 enum instruction_type type, u32 *inst);
88 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
90 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
92 extern int kvmppc_emulate_instruction(struct kvm_run *run,
93 struct kvm_vcpu *vcpu);
94 extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
95 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
96 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
97 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
98 extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
99 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
100 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
101 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
103 /* Core-specific hooks */
105 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
106 unsigned int gtlb_idx);
107 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
108 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
109 extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
110 extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
111 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
112 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
113 extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
115 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
116 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
117 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
118 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
119 struct kvmppc_pte *pte);
121 extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
123 extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
124 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
125 extern int kvmppc_core_check_processor_compat(void);
126 extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
127 struct kvm_translation *tr);
129 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
130 extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
132 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
133 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
134 extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
135 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
136 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
137 extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
138 struct kvm_interrupt *irq);
139 extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
140 extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
142 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
145 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
146 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
148 extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
149 extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
151 extern int kvmppc_booke_init(void);
152 extern void kvmppc_booke_exit(void);
154 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
155 extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
156 extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
158 extern long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp);
159 extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp);
160 extern void kvmppc_free_hpt(struct kvm *kvm);
161 extern long kvmppc_prepare_vrma(struct kvm *kvm,
162 struct kvm_userspace_memory_region *mem);
163 extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
164 struct kvm_memory_slot *memslot, unsigned long porder);
165 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
167 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
168 struct kvm_create_spapr_tce *args);
169 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
170 unsigned long ioba, unsigned long tce);
171 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
173 extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
174 extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
175 extern int kvmppc_core_init_vm(struct kvm *kvm);
176 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
177 extern void kvmppc_core_free_memslot(struct kvm *kvm,
178 struct kvm_memory_slot *free,
179 struct kvm_memory_slot *dont);
180 extern int kvmppc_core_create_memslot(struct kvm *kvm,
181 struct kvm_memory_slot *slot,
182 unsigned long npages);
183 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
184 struct kvm_memory_slot *memslot,
185 struct kvm_userspace_memory_region *mem);
186 extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
187 struct kvm_userspace_memory_region *mem,
188 const struct kvm_memory_slot *old);
189 extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
190 struct kvm_ppc_smmu_info *info);
191 extern void kvmppc_core_flush_memslot(struct kvm *kvm,
192 struct kvm_memory_slot *memslot);
194 extern int kvmppc_bookehv_init(void);
195 extern void kvmppc_bookehv_exit(void);
197 extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
199 extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
201 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
203 extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
204 extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
205 extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
206 extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
208 extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
210 extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
211 extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
213 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
214 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
216 union kvmppc_one_reg {
228 struct module *owner;
229 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
230 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
231 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
232 union kvmppc_one_reg *val);
233 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
234 union kvmppc_one_reg *val);
235 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
236 void (*vcpu_put)(struct kvm_vcpu *vcpu);
237 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
238 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
239 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
240 void (*vcpu_free)(struct kvm_vcpu *vcpu);
241 int (*check_requests)(struct kvm_vcpu *vcpu);
242 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
243 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
244 int (*prepare_memory_region)(struct kvm *kvm,
245 struct kvm_memory_slot *memslot,
246 struct kvm_userspace_memory_region *mem);
247 void (*commit_memory_region)(struct kvm *kvm,
248 struct kvm_userspace_memory_region *mem,
249 const struct kvm_memory_slot *old);
250 int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
251 int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
253 int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
254 int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
255 void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
256 void (*mmu_destroy)(struct kvm_vcpu *vcpu);
257 void (*free_memslot)(struct kvm_memory_slot *free,
258 struct kvm_memory_slot *dont);
259 int (*create_memslot)(struct kvm_memory_slot *slot,
260 unsigned long npages);
261 int (*init_vm)(struct kvm *kvm);
262 void (*destroy_vm)(struct kvm *kvm);
263 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
264 int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
265 unsigned int inst, int *advance);
266 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
267 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
268 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
269 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
271 int (*hcall_implemented)(unsigned long hcall);
274 extern struct kvmppc_ops *kvmppc_hv_ops;
275 extern struct kvmppc_ops *kvmppc_pr_ops;
277 static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
278 enum instruction_type type, u32 *inst)
280 int ret = EMULATE_DONE;
283 /* Load the instruction manually if it failed to do so in the
285 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
286 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
288 /* Write fetch_failed unswapped if the fetch failed */
289 if (ret == EMULATE_DONE)
290 fetched_inst = kvmppc_need_byteswap(vcpu) ?
291 swab32(vcpu->arch.last_inst) :
292 vcpu->arch.last_inst;
294 fetched_inst = vcpu->arch.last_inst;
296 *inst = fetched_inst;
300 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
302 return kvm->arch.kvm_ops == kvmppc_hv_ops;
306 * Cuts out inst bits with ordering according to spec.
307 * That means the leftmost bit is zero. All given bits are included.
309 static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
316 mask = (1 << (lsb - msb + 1)) - 1;
317 r = (inst >> (63 - lsb)) & mask;
323 * Replaces inst bits with ordering according to spec.
325 static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
332 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
333 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
338 #define one_reg_size(id) \
339 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
341 #define get_reg_val(id, reg) ({ \
342 union kvmppc_one_reg __u; \
343 switch (one_reg_size(id)) { \
344 case 4: __u.wval = (reg); break; \
345 case 8: __u.dval = (reg); break; \
352 #define set_reg_val(id, val) ({ \
354 switch (one_reg_size(id)) { \
355 case 4: __v = (val).wval; break; \
356 case 8: __v = (val).dval; break; \
362 int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
363 int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
365 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
366 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
368 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
369 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
370 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
371 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
373 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
377 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
378 extern void kvm_cma_reserve(void) __init;
379 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
381 paca[cpu].kvm_hstate.xics_phys = addr;
384 static inline u32 kvmppc_get_xics_latch(void)
388 xirr = get_paca()->kvm_hstate.saved_xirr;
389 get_paca()->kvm_hstate.saved_xirr = 0;
393 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
395 paca[cpu].kvm_hstate.host_ipi = host_ipi;
398 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
400 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
403 extern void kvm_hv_vm_activated(void);
404 extern void kvm_hv_vm_deactivated(void);
405 extern bool kvm_hv_mode_active(void);
408 static inline void __init kvm_cma_reserve(void)
411 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
414 static inline u32 kvmppc_get_xics_latch(void)
419 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
422 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
427 static inline bool kvm_hv_mode_active(void) { return false; }
431 #ifdef CONFIG_KVM_XICS
432 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
434 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
436 extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
437 extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
438 extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
439 extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
440 extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
441 extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
442 extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
443 struct kvm_vcpu *vcpu, u32 cpu);
445 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
447 static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
448 static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu,
449 unsigned long server)
451 static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm,
452 struct kvm_irq_level *args)
454 static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
458 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
460 #ifdef CONFIG_KVM_BOOKE_HV
461 return mfspr(SPRN_GEPR);
462 #elif defined(CONFIG_BOOKE)
463 return vcpu->arch.epr;
469 static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
471 #ifdef CONFIG_KVM_BOOKE_HV
472 mtspr(SPRN_GEPR, epr);
473 #elif defined(CONFIG_BOOKE)
474 vcpu->arch.epr = epr;
478 #ifdef CONFIG_KVM_MPIC
480 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
481 int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
483 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
487 static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
491 static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
492 struct kvm_vcpu *vcpu, u32 cpu)
497 static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
498 struct kvm_vcpu *vcpu)
502 #endif /* CONFIG_KVM_MPIC */
504 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
505 struct kvm_config_tlb *cfg);
506 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
507 struct kvm_dirty_tlb *cfg);
509 long kvmppc_alloc_lpid(void);
510 void kvmppc_claim_lpid(long lpid);
511 void kvmppc_free_lpid(long lpid);
512 void kvmppc_init_lpid(unsigned long nr_lpids);
514 static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
518 * We can only access pages that the kernel maps
519 * as memory. Bail out for unmapped ones.
524 /* Clear i-cache for new pages */
525 page = pfn_to_page(pfn);
526 if (!test_bit(PG_arch_1, &page->flags)) {
527 flush_dcache_icache_page(page);
528 set_bit(PG_arch_1, &page->flags);
533 * Shared struct helpers. The shared struct can be little or big endian,
534 * depending on the guest endianness. So expose helpers to all of them.
536 static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
538 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
539 /* Only Book3S_64 PR supports bi-endian for now */
540 return vcpu->arch.shared_big_endian;
541 #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
542 /* Book3s_64 HV on little endian is always little endian */
549 #define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
550 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
552 return mfspr(bookehv_spr); \
555 #define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
556 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
558 mtspr(bookehv_spr, val); \
561 #define SHARED_WRAPPER_GET(reg, size) \
562 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
564 if (kvmppc_shared_big_endian(vcpu)) \
565 return be##size##_to_cpu(vcpu->arch.shared->reg); \
567 return le##size##_to_cpu(vcpu->arch.shared->reg); \
570 #define SHARED_WRAPPER_SET(reg, size) \
571 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
573 if (kvmppc_shared_big_endian(vcpu)) \
574 vcpu->arch.shared->reg = cpu_to_be##size(val); \
576 vcpu->arch.shared->reg = cpu_to_le##size(val); \
579 #define SHARED_WRAPPER(reg, size) \
580 SHARED_WRAPPER_GET(reg, size) \
581 SHARED_WRAPPER_SET(reg, size) \
583 #define SPRNG_WRAPPER(reg, bookehv_spr) \
584 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
585 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
587 #ifdef CONFIG_KVM_BOOKE_HV
589 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
590 SPRNG_WRAPPER(reg, bookehv_spr) \
594 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
595 SHARED_WRAPPER(reg, size) \
599 SHARED_WRAPPER(critical, 64)
600 SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
601 SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
602 SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
603 SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
604 SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
605 SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
606 SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
607 SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
608 SHARED_WRAPPER_GET(msr, 64)
609 static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
611 if (kvmppc_shared_big_endian(vcpu))
612 vcpu->arch.shared->msr = cpu_to_be64(val);
614 vcpu->arch.shared->msr = cpu_to_le64(val);
616 SHARED_WRAPPER(dsisr, 32)
617 SHARED_WRAPPER(int_pending, 32)
618 SHARED_WRAPPER(sprg4, 64)
619 SHARED_WRAPPER(sprg5, 64)
620 SHARED_WRAPPER(sprg6, 64)
621 SHARED_WRAPPER(sprg7, 64)
623 static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
625 if (kvmppc_shared_big_endian(vcpu))
626 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
628 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
631 static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
633 if (kvmppc_shared_big_endian(vcpu))
634 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
636 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
640 * Please call after prepare_to_enter. This function puts the lazy ee and irq
641 * disabled tracking state back to normal mode, without actually enabling
644 static inline void kvmppc_fix_ee_before_entry(void)
650 * To avoid races, the caller must have gone directly from having
651 * interrupts fully-enabled to hard-disabled.
653 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
655 /* Only need to enable IRQs by hard enabling them after this */
656 local_paca->irq_happened = 0;
657 local_paca->soft_enabled = 1;
661 static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
666 ea = kvmppc_get_gpr(vcpu, rb);
668 ea += kvmppc_get_gpr(vcpu, ra);
670 #if defined(CONFIG_PPC_BOOK3E_64)
672 #elif defined(CONFIG_PPC_BOOK3S_64)
676 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
682 extern void xics_wake_cpu(int cpu);
684 #endif /* __POWERPC_KVM_PPC_H__ */