1 /* SPDX-License-Identifier: GPL-2.0-only */
4 * Copyright SUSE Linux Products GmbH 2009
9 #ifndef __ASM_KVM_BOOK3S_H__
10 #define __ASM_KVM_BOOK3S_H__
12 #include <linux/types.h>
13 #include <linux/kvm_host.h>
14 #include <asm/kvm_book3s_asm.h>
15 #include <asm/guest-state-buffer.h>
28 struct kvmppc_sid_map {
35 #define SID_MAP_BITS 9
36 #define SID_MAP_NUM (1 << SID_MAP_BITS)
37 #define SID_MAP_MASK (SID_MAP_NUM - 1)
39 #ifdef CONFIG_PPC_BOOK3S_64
40 #define SID_CONTEXTS 1
42 #define SID_CONTEXTS 128
43 #define VSID_POOL_SIZE (SID_CONTEXTS * 16)
47 struct hlist_node list_pte;
48 struct hlist_node list_pte_long;
49 struct hlist_node list_vpte;
50 struct hlist_node list_vpte_long;
51 #ifdef CONFIG_PPC_BOOK3S_64
52 struct hlist_node list_vpte_64k;
54 struct rcu_head rcu_head;
58 struct kvmppc_pte pte;
63 * Struct for a virtual core.
64 * Note: entry_exit_map combines a bitmap of threads that have entered
65 * in the bottom 8 bits and a bitmap of threads that have exited in the
66 * next 8 bits. This is so that we can atomically set the entry bit
67 * iff the exit map is 0 without taking a lock.
79 struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
80 struct list_head preempt_list;
83 spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
86 struct kvm_vcpu *runner;
88 u64 tb_offset; /* guest timebase - host timebase */
89 u64 tb_offset_applied; /* timebase offset currently in force */
93 ulong dpdes; /* doorbell state (POWER8) */
94 ulong vtb; /* virtual timebase */
95 ulong conferring_threads;
96 unsigned int halt_poll_ns;
97 atomic_t online_count;
100 struct kvmppc_vcpu_book3s {
101 struct kvmppc_sid_map sid_map[SID_MAP_NUM];
107 struct kvmppc_bat ibat[8];
108 struct kvmppc_bat dbat[8];
115 #ifdef CONFIG_PPC_BOOK3S_32
116 u32 vsid_pool[VSID_POOL_SIZE];
119 u64 proto_vsid_first;
123 int context_id[SID_CONTEXTS];
125 bool hior_explicit; /* HIOR is set by ioctl, not PVR */
127 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
128 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
129 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
130 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
131 #ifdef CONFIG_PPC_BOOK3S_64
132 struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
134 int hpte_cache_count;
138 #define VSID_REAL 0x07ffffffffc00000ULL
139 #define VSID_BAT 0x07ffffffffb00000ULL
140 #define VSID_64K 0x0800000000000000ULL
141 #define VSID_1T 0x1000000000000000ULL
142 #define VSID_REAL_DR 0x2000000000000000ULL
143 #define VSID_REAL_IR 0x4000000000000000ULL
144 #define VSID_PR 0x8000000000000000ULL
146 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
147 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
148 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
149 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
150 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
151 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
152 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
153 extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
155 extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
156 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
157 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
158 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
159 extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
160 unsigned long addr, unsigned long status);
161 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
162 unsigned long slb_v, unsigned long valid);
163 extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
164 unsigned long gpa, gva_t ea, int is_store);
166 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
167 extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
168 extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
169 extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
170 extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
171 extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
172 extern int kvmppc_mmu_hpte_sysinit(void);
173 extern void kvmppc_mmu_hpte_sysexit(void);
174 extern int kvmppc_mmu_hv_init(void);
175 extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
177 extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
178 unsigned long ea, unsigned long dsisr);
179 extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
180 gva_t eaddr, void *to, void *from,
182 extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
183 void *to, unsigned long n);
184 extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
185 void *from, unsigned long n);
186 extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
187 struct kvmppc_pte *gpte, u64 root,
189 extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
190 struct kvmppc_pte *gpte, u64 table,
191 int table_index, u64 *pte_ret_p);
192 extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
193 struct kvmppc_pte *gpte, bool data, bool iswrite);
194 extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
195 unsigned int pshift, u64 lpid);
196 extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
198 const struct kvm_memory_slot *memslot,
200 extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested,
201 bool writing, unsigned long gpa,
203 extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
205 struct kvm_memory_slot *memslot,
207 pte_t *inserted_pte, unsigned int *levelp);
208 extern int kvmppc_init_vm_radix(struct kvm *kvm);
209 extern void kvmppc_free_radix(struct kvm *kvm);
210 extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
212 extern int kvmppc_radix_init(void);
213 extern void kvmppc_radix_exit(void);
214 extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
216 extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
218 extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
220 extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
221 struct kvm_memory_slot *memslot, unsigned long *map);
222 extern void kvmppc_radix_flush_memslot(struct kvm *kvm,
223 const struct kvm_memory_slot *memslot);
224 extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
226 /* XXX remove this export when load_last_inst() is generic */
227 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
228 extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
229 extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
231 extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
232 extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
233 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
234 bool upper, u32 val);
235 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
236 extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
237 extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
238 bool writing, bool *writable, struct page **page);
239 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
240 unsigned long *rmap, long pte_index, int realmode);
241 extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
242 unsigned long gfn, unsigned long psize);
243 extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
244 unsigned long pte_index);
245 void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
246 unsigned long pte_index);
247 extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
248 unsigned long *nb_ret);
249 extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
250 unsigned long gpa, bool dirty);
251 extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
252 long pte_index, unsigned long pteh, unsigned long ptel,
253 pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
254 extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
255 unsigned long pte_index, unsigned long avpn,
256 unsigned long *hpret);
257 extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
258 struct kvm_memory_slot *memslot, unsigned long *map);
259 extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
260 struct kvm_memory_slot *memslot,
262 extern unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm,
264 extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
266 extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
268 extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu);
269 extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu);
270 extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu);
272 extern void kvmppc_entry_trampoline(void);
273 extern void kvmppc_hv_entry_trampoline(void);
274 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
275 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
276 extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
277 extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
278 extern int kvmppc_hcall_impl_pr(unsigned long cmd);
279 extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
280 extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
281 extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
283 long kvmppc_read_intr(void);
284 void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
285 void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
287 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
288 void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
289 void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
290 void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
291 void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
293 static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
294 static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
295 static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
296 static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
299 extern unsigned long nested_capabilities;
300 long kvmhv_nested_init(void);
301 void kvmhv_nested_exit(void);
302 void kvmhv_vm_nested_init(struct kvm *kvm);
303 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
304 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
305 void kvmhv_flush_lpid(u64 lpid);
306 void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1);
307 void kvmhv_release_all_nested(struct kvm *kvm);
308 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
309 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
310 long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
311 unsigned long type, unsigned long pg_sizes,
312 unsigned long start, unsigned long end);
313 int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
314 u64 time_limit, unsigned long lpcr);
315 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
316 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
317 struct hv_guest_state *hr);
318 long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
320 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
323 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
325 extern struct static_key_false __kvmhv_is_nestedv2;
327 static inline bool kvmhv_is_nestedv2(void)
329 return static_branch_unlikely(&__kvmhv_is_nestedv2);
332 static inline bool kvmhv_is_nestedv1(void)
334 return !static_branch_likely(&__kvmhv_is_nestedv2);
339 static inline bool kvmhv_is_nestedv2(void)
344 static inline bool kvmhv_is_nestedv1(void)
351 int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
352 int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
353 int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden);
354 int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden);
356 static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu,
357 struct pt_regs *regs)
359 if (kvmhv_is_nestedv2())
360 return __kvmhv_nestedv2_reload_ptregs(vcpu, regs);
363 static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
364 struct pt_regs *regs)
366 if (kvmhv_is_nestedv2())
367 return __kvmhv_nestedv2_mark_dirty_ptregs(vcpu, regs);
371 static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
373 if (kvmhv_is_nestedv2())
374 return __kvmhv_nestedv2_mark_dirty(vcpu, iden);
378 static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
380 if (kvmhv_is_nestedv2())
381 return __kvmhv_nestedv2_cached_reload(vcpu, iden);
385 extern int kvm_irq_bypass;
387 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
389 return vcpu->arch.book3s;
392 /* Also add subarch specific defines */
394 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
395 #include <asm/kvm_book3s_32.h>
397 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
398 #include <asm/kvm_book3s_64.h>
401 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
403 vcpu->arch.regs.gpr[num] = val;
404 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(num));
407 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
409 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_GPR(num)) < 0);
410 return vcpu->arch.regs.gpr[num];
413 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
415 vcpu->arch.regs.ccr = val;
416 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
419 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
421 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CR) < 0);
422 return vcpu->arch.regs.ccr;
425 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
427 vcpu->arch.regs.xer = val;
428 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
431 static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
433 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_XER) < 0);
434 return vcpu->arch.regs.xer;
437 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
439 vcpu->arch.regs.ctr = val;
440 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
443 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
445 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CTR) < 0);
446 return vcpu->arch.regs.ctr;
449 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
451 vcpu->arch.regs.link = val;
452 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
455 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
457 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_LR) < 0);
458 return vcpu->arch.regs.link;
461 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
463 vcpu->arch.regs.nip = val;
464 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
467 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
469 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_NIA) < 0);
470 return vcpu->arch.regs.nip;
473 static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
474 static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
476 return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
479 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
481 return vcpu->arch.fault_dar;
484 static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
486 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0);
487 return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
490 static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
492 vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
493 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i));
496 static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu)
498 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_FPSCR) < 0);
499 return vcpu->arch.fp.fpscr;
502 static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val)
504 vcpu->arch.fp.fpscr = val;
505 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_FPSCR);
509 static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j)
511 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0);
512 return vcpu->arch.fp.fpr[i][j];
515 static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j,
518 vcpu->arch.fp.fpr[i][j] = val;
519 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i));
522 #ifdef CONFIG_ALTIVEC
523 static inline void kvmppc_get_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 *v)
525 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(32 + i)) < 0);
526 *v = vcpu->arch.vr.vr[i];
529 static inline void kvmppc_set_vsx_vr(struct kvm_vcpu *vcpu, int i,
532 vcpu->arch.vr.vr[i] = *val;
533 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(32 + i));
536 static inline u32 kvmppc_get_vscr(struct kvm_vcpu *vcpu)
538 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSCR) < 0);
539 return vcpu->arch.vr.vscr.u[3];
542 static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val)
544 vcpu->arch.vr.vscr.u[3] = val;
545 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSCR);
549 #define KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden) \
550 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
553 vcpu->arch.reg = val; \
554 kvmhv_nestedv2_mark_dirty(vcpu, iden); \
557 #define KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden) \
558 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
560 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
561 return vcpu->arch.reg; \
564 #define KVMPPC_BOOK3S_VCPU_ACCESSOR(reg, size, iden) \
565 KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden) \
566 KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden) \
568 KVMPPC_BOOK3S_VCPU_ACCESSOR(pid, 32, KVMPPC_GSID_PIDR)
569 KVMPPC_BOOK3S_VCPU_ACCESSOR(tar, 64, KVMPPC_GSID_TAR)
570 KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbhr, 64, KVMPPC_GSID_EBBHR)
571 KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbrr, 64, KVMPPC_GSID_EBBRR)
572 KVMPPC_BOOK3S_VCPU_ACCESSOR(bescr, 64, KVMPPC_GSID_BESCR)
573 KVMPPC_BOOK3S_VCPU_ACCESSOR(ic, 64, KVMPPC_GSID_IC)
574 KVMPPC_BOOK3S_VCPU_ACCESSOR(vrsave, 64, KVMPPC_GSID_VRSAVE)
577 #define KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden) \
578 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
580 vcpu->arch.vcore->reg = val; \
581 kvmhv_nestedv2_mark_dirty(vcpu, iden); \
584 #define KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden) \
585 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
587 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
588 return vcpu->arch.vcore->reg; \
591 #define KVMPPC_BOOK3S_VCORE_ACCESSOR(reg, size, iden) \
592 KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden) \
593 KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden) \
596 KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB)
597 KVMPPC_BOOK3S_VCORE_ACCESSOR(dpdes, 64, KVMPPC_GSID_DPDES)
598 KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR)
599 KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR)
600 KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(tb_offset, 64, KVMPPC_GSID_TB_OFFSET)
602 static inline u64 kvmppc_get_tb_offset(struct kvm_vcpu *vcpu)
604 return vcpu->arch.vcore->tb_offset;
607 static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu)
609 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB) < 0);
610 return vcpu->arch.dec_expires;
613 static inline void kvmppc_set_dec_expires(struct kvm_vcpu *vcpu, u64 val)
615 vcpu->arch.dec_expires = val;
616 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB);
619 /* Expiry time of vcpu DEC relative to host TB */
620 static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu)
622 return kvmppc_get_dec_expires(vcpu) - kvmppc_get_tb_offset(vcpu);
625 static inline bool is_kvmppc_resume_guest(int r)
627 return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
630 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
631 static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
633 /* Only PR KVM supports the magic page */
634 return !is_kvmppc_hv_enabled(vcpu->kvm);
637 extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
638 extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
640 /* Magic register values loaded into r3 and r4 before the 'sc' assembly
641 * instruction for the OSI hypercalls */
642 #define OSI_SC_MAGIC_R3 0x113724FA
643 #define OSI_SC_MAGIC_R4 0x77810F9B
645 #define INS_DCBZ 0x7c0007ec
646 /* TO = 31 for unconditional trap */
647 #define INS_TW 0x7fe00008
649 #define SPLIT_HACK_MASK 0xff000000
650 #define SPLIT_HACK_OFFS 0xfb000000
653 * This packs a VCPU ID from the [0..KVM_MAX_VCPU_IDS) space down to the
654 * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride
655 * (but not its actual threading mode, which is not available) to avoid
658 * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block
659 * 0) unchanged: if the guest is filling each VCORE completely then it will be
660 * using consecutive IDs and it will fill the space without any packing.
662 * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo
663 * KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is
664 * added to avoid collisions.
666 * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only
667 * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs
668 * can be safely packed into the second half of each VCORE by adding an offset
671 * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4))
672 * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each
673 * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4).
675 * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a
676 * stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7
677 * must be free to use.
679 * (The offsets for each block are stored in block_offsets[], indexed by the
680 * block number if the stride is 8. For cases where the guest's stride is less
681 * than 8, we can re-use the block_offsets array by multiplying the block
682 * number by (MAX_SMT_THREADS / stride) to reach the correct entry.)
684 static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id)
686 const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7};
687 int stride = kvm->arch.emul_smt_mode;
688 int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
691 if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
693 packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block];
694 if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed"))
699 #endif /* __ASM_KVM_BOOK3S_H__ */