1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright IBM Corp. 2007
5 * Copyright 2011 Freescale Semiconductor, Inc.
10 #include <linux/jiffies.h>
11 #include <linux/hrtimer.h>
12 #include <linux/types.h>
13 #include <linux/string.h>
14 #include <linux/kvm_host.h>
15 #include <linux/clockchips.h>
19 #include <asm/byteorder.h>
20 #include <asm/kvm_ppc.h>
21 #include <asm/disassemble.h>
22 #include <asm/ppc-opcode.h>
23 #include <asm/sstep.h>
28 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
30 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
31 kvmppc_core_queue_fpunavail(vcpu);
37 #endif /* CONFIG_PPC_FPU */
40 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
42 if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
43 kvmppc_core_queue_vsx_unavail(vcpu);
49 #endif /* CONFIG_VSX */
52 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
54 if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
55 kvmppc_core_queue_vec_unavail(vcpu);
61 #endif /* CONFIG_ALTIVEC */
66 * vector loads and stores
68 * Instructions that trap when used on cache-inhibited mappings
69 * are not emulated here: multiple and string instructions,
70 * lq/stq, and the load-reserve/store-conditional instructions.
72 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
74 struct kvm_run *run = vcpu->run;
77 enum emulation_result emulated = EMULATE_FAIL;
79 struct instruction_op op;
81 /* this default type might be overwritten by subcategories */
82 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
84 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
85 if (emulated != EMULATE_DONE)
93 * if mmio_vsx_tx_sx_enabled == 0, copy data between
94 * VSR[0..31] and memory
95 * if mmio_vsx_tx_sx_enabled == 1, copy data between
96 * VSR[32..63] and memory
98 vcpu->arch.mmio_vsx_copy_nums = 0;
99 vcpu->arch.mmio_vsx_offset = 0;
100 vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
101 vcpu->arch.mmio_sp64_extend = 0;
102 vcpu->arch.mmio_sign_extend = 0;
103 vcpu->arch.mmio_vmx_copy_nums = 0;
104 vcpu->arch.mmio_vmx_offset = 0;
105 vcpu->arch.mmio_host_swabbed = 0;
107 emulated = EMULATE_FAIL;
108 vcpu->arch.regs.msr = vcpu->arch.shared->msr;
109 if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
110 int type = op.type & INSTR_TYPE_MASK;
111 int size = GETSIZE(op.type);
115 int instr_byte_swap = op.type & BYTEREV;
117 if (op.type & SIGNEXT)
118 emulated = kvmppc_handle_loads(run, vcpu,
119 op.reg, size, !instr_byte_swap);
121 emulated = kvmppc_handle_load(run, vcpu,
122 op.reg, size, !instr_byte_swap);
124 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
125 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
129 #ifdef CONFIG_PPC_FPU
131 if (kvmppc_check_fp_disabled(vcpu))
134 if (op.type & FPCONV)
135 vcpu->arch.mmio_sp64_extend = 1;
137 if (op.type & SIGNEXT)
138 emulated = kvmppc_handle_loads(run, vcpu,
139 KVM_MMIO_REG_FPR|op.reg, size, 1);
141 emulated = kvmppc_handle_load(run, vcpu,
142 KVM_MMIO_REG_FPR|op.reg, size, 1);
144 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
145 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
149 #ifdef CONFIG_ALTIVEC
151 if (kvmppc_check_altivec_disabled(vcpu))
154 /* Hardware enforces alignment of VMX accesses */
155 vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
156 vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
158 if (size == 16) { /* lvx */
159 vcpu->arch.mmio_copy_type =
160 KVMPPC_VMX_COPY_DWORD;
161 } else if (size == 4) { /* lvewx */
162 vcpu->arch.mmio_copy_type =
163 KVMPPC_VMX_COPY_WORD;
164 } else if (size == 2) { /* lvehx */
165 vcpu->arch.mmio_copy_type =
166 KVMPPC_VMX_COPY_HWORD;
167 } else if (size == 1) { /* lvebx */
168 vcpu->arch.mmio_copy_type =
169 KVMPPC_VMX_COPY_BYTE;
173 vcpu->arch.mmio_vmx_offset =
174 (vcpu->arch.vaddr_accessed & 0xf)/size;
177 vcpu->arch.mmio_vmx_copy_nums = 2;
178 emulated = kvmppc_handle_vmx_load(run,
179 vcpu, KVM_MMIO_REG_VMX|op.reg,
182 vcpu->arch.mmio_vmx_copy_nums = 1;
183 emulated = kvmppc_handle_vmx_load(run, vcpu,
184 KVM_MMIO_REG_VMX|op.reg,
193 if (op.vsx_flags & VSX_CHECK_VEC) {
194 if (kvmppc_check_altivec_disabled(vcpu))
197 if (kvmppc_check_vsx_disabled(vcpu))
201 if (op.vsx_flags & VSX_FPCONV)
202 vcpu->arch.mmio_sp64_extend = 1;
204 if (op.element_size == 8) {
205 if (op.vsx_flags & VSX_SPLAT)
206 vcpu->arch.mmio_copy_type =
207 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
209 vcpu->arch.mmio_copy_type =
210 KVMPPC_VSX_COPY_DWORD;
211 } else if (op.element_size == 4) {
212 if (op.vsx_flags & VSX_SPLAT)
213 vcpu->arch.mmio_copy_type =
214 KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
216 vcpu->arch.mmio_copy_type =
217 KVMPPC_VSX_COPY_WORD;
221 if (size < op.element_size) {
222 /* precision convert case: lxsspx, etc */
223 vcpu->arch.mmio_vsx_copy_nums = 1;
225 } else { /* lxvw4x, lxvd2x, etc */
226 vcpu->arch.mmio_vsx_copy_nums =
227 size/op.element_size;
228 io_size_each = op.element_size;
231 emulated = kvmppc_handle_vsx_load(run, vcpu,
232 KVM_MMIO_REG_VSX|op.reg, io_size_each,
233 1, op.type & SIGNEXT);
238 /* if need byte reverse, op.val has been reversed by
241 emulated = kvmppc_handle_store(run, vcpu, op.val,
244 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
245 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
248 #ifdef CONFIG_PPC_FPU
250 if (kvmppc_check_fp_disabled(vcpu))
253 /* The FP registers need to be flushed so that
254 * kvmppc_handle_store() can read actual FP vals
257 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
258 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
261 if (op.type & FPCONV)
262 vcpu->arch.mmio_sp64_extend = 1;
264 emulated = kvmppc_handle_store(run, vcpu,
265 VCPU_FPR(vcpu, op.reg), size, 1);
267 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
268 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
272 #ifdef CONFIG_ALTIVEC
274 if (kvmppc_check_altivec_disabled(vcpu))
277 /* Hardware enforces alignment of VMX accesses. */
278 vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
279 vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
281 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
282 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
284 if (size == 16) { /* stvx */
285 vcpu->arch.mmio_copy_type =
286 KVMPPC_VMX_COPY_DWORD;
287 } else if (size == 4) { /* stvewx */
288 vcpu->arch.mmio_copy_type =
289 KVMPPC_VMX_COPY_WORD;
290 } else if (size == 2) { /* stvehx */
291 vcpu->arch.mmio_copy_type =
292 KVMPPC_VMX_COPY_HWORD;
293 } else if (size == 1) { /* stvebx */
294 vcpu->arch.mmio_copy_type =
295 KVMPPC_VMX_COPY_BYTE;
299 vcpu->arch.mmio_vmx_offset =
300 (vcpu->arch.vaddr_accessed & 0xf)/size;
303 vcpu->arch.mmio_vmx_copy_nums = 2;
304 emulated = kvmppc_handle_vmx_store(run,
307 vcpu->arch.mmio_vmx_copy_nums = 1;
308 emulated = kvmppc_handle_vmx_store(run,
309 vcpu, op.reg, size, 1);
318 if (op.vsx_flags & VSX_CHECK_VEC) {
319 if (kvmppc_check_altivec_disabled(vcpu))
322 if (kvmppc_check_vsx_disabled(vcpu))
326 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
327 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
330 if (op.vsx_flags & VSX_FPCONV)
331 vcpu->arch.mmio_sp64_extend = 1;
333 if (op.element_size == 8)
334 vcpu->arch.mmio_copy_type =
335 KVMPPC_VSX_COPY_DWORD;
336 else if (op.element_size == 4)
337 vcpu->arch.mmio_copy_type =
338 KVMPPC_VSX_COPY_WORD;
342 if (size < op.element_size) {
343 /* precise conversion case, like stxsspx */
344 vcpu->arch.mmio_vsx_copy_nums = 1;
346 } else { /* stxvw4x, stxvd2x, etc */
347 vcpu->arch.mmio_vsx_copy_nums =
348 size/op.element_size;
349 io_size_each = op.element_size;
352 emulated = kvmppc_handle_vsx_store(run, vcpu,
353 op.reg, io_size_each, 1);
358 /* Do nothing. The guest is performing dcbi because
359 * hardware DMA is not snooped by the dcache, but
360 * emulated DMA either goes through the dcache as
361 * normal writes, or the host kernel has handled dcache
364 emulated = EMULATE_DONE;
371 if (emulated == EMULATE_FAIL) {
373 kvmppc_core_queue_program(vcpu, 0);
376 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
378 /* Advance past emulated instruction. */
380 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);