2 * Copyright (C) 2016 Veertu Inc,
3 * Copyright (C) 2017 Google Inc,
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
19 /////////////////////////////////////////////////////////////////////////
21 // Copyright (C) 2001-2012 The Bochs Project
23 // This library is free software; you can redistribute it and/or
24 // modify it under the terms of the GNU Lesser General Public
25 // License as published by the Free Software Foundation; either
26 // version 2 of the License, or (at your option) any later version.
28 // This library is distributed in the hope that it will be useful,
29 // but WITHOUT ANY WARRANTY; without even the implied warranty of
30 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
31 // Lesser General Public License for more details.
33 // You should have received a copy of the GNU Lesser General Public
34 // License along with this library; if not, write to the Free Software
35 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
36 /////////////////////////////////////////////////////////////////////////
38 #include "qemu/osdep.h"
40 #include "qemu-common.h"
41 #include "x86_decode.h"
45 #include "x86_flags.h"
49 void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data,
50 int direction, int size, uint32_t count);
52 #define EXEC_2OP_LOGIC_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \
54 fetch_operands(env, decode, 2, true, true, false); \
55 switch (decode->operand_size) { \
58 uint8_t v1 = (uint8_t)decode->op[0].val; \
59 uint8_t v2 = (uint8_t)decode->op[1].val; \
60 uint8_t diff = v1 cmd v2; \
62 write_val_ext(env, decode->op[0].ptr, diff, 1); \
64 FLAGS_FUNC##_8(diff); \
69 uint16_t v1 = (uint16_t)decode->op[0].val; \
70 uint16_t v2 = (uint16_t)decode->op[1].val; \
71 uint16_t diff = v1 cmd v2; \
73 write_val_ext(env, decode->op[0].ptr, diff, 2); \
75 FLAGS_FUNC##_16(diff); \
80 uint32_t v1 = (uint32_t)decode->op[0].val; \
81 uint32_t v2 = (uint32_t)decode->op[1].val; \
82 uint32_t diff = v1 cmd v2; \
84 write_val_ext(env, decode->op[0].ptr, diff, 4); \
86 FLAGS_FUNC##_32(diff); \
90 VM_PANIC("bad size\n"); \
95 #define EXEC_2OP_ARITH_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \
97 fetch_operands(env, decode, 2, true, true, false); \
98 switch (decode->operand_size) { \
101 uint8_t v1 = (uint8_t)decode->op[0].val; \
102 uint8_t v2 = (uint8_t)decode->op[1].val; \
103 uint8_t diff = v1 cmd v2; \
105 write_val_ext(env, decode->op[0].ptr, diff, 1); \
107 FLAGS_FUNC##_8(v1, v2, diff); \
112 uint16_t v1 = (uint16_t)decode->op[0].val; \
113 uint16_t v2 = (uint16_t)decode->op[1].val; \
114 uint16_t diff = v1 cmd v2; \
116 write_val_ext(env, decode->op[0].ptr, diff, 2); \
118 FLAGS_FUNC##_16(v1, v2, diff); \
123 uint32_t v1 = (uint32_t)decode->op[0].val; \
124 uint32_t v2 = (uint32_t)decode->op[1].val; \
125 uint32_t diff = v1 cmd v2; \
127 write_val_ext(env, decode->op[0].ptr, diff, 4); \
129 FLAGS_FUNC##_32(v1, v2, diff); \
133 VM_PANIC("bad size\n"); \
137 addr_t read_reg(CPUX86State *env, int reg, int size)
141 return env->hvf_emul->regs[reg].lx;
143 return env->hvf_emul->regs[reg].rx;
145 return env->hvf_emul->regs[reg].erx;
147 return env->hvf_emul->regs[reg].rrx;
149 VM_PANIC_ON("read_reg size");
154 void write_reg(CPUX86State *env, int reg, addr_t val, int size)
158 env->hvf_emul->regs[reg].lx = val;
161 env->hvf_emul->regs[reg].rx = val;
164 env->hvf_emul->regs[reg].rrx = (uint32_t)val;
167 env->hvf_emul->regs[reg].rrx = val;
170 VM_PANIC_ON("write_reg size");
174 addr_t read_val_from_reg(addr_t reg_ptr, int size)
180 val = *(uint8_t *)reg_ptr;
183 val = *(uint16_t *)reg_ptr;
186 val = *(uint32_t *)reg_ptr;
189 val = *(uint64_t *)reg_ptr;
192 VM_PANIC_ON_EX(1, "read_val: Unknown size %d\n", size);
198 void write_val_to_reg(addr_t reg_ptr, addr_t val, int size)
202 *(uint8_t *)reg_ptr = val;
205 *(uint16_t *)reg_ptr = val;
208 *(uint64_t *)reg_ptr = (uint32_t)val;
211 *(uint64_t *)reg_ptr = val;
214 VM_PANIC("write_val: Unknown size\n");
219 static bool is_host_reg(struct CPUX86State *env, addr_t ptr)
221 return (ptr - (addr_t)&env->hvf_emul->regs[0]) < sizeof(env->hvf_emul->regs);
224 void write_val_ext(struct CPUX86State *env, addr_t ptr, addr_t val, int size)
226 if (is_host_reg(env, ptr)) {
227 write_val_to_reg(ptr, val, size);
230 vmx_write_mem(ENV_GET_CPU(env), ptr, &val, size);
233 uint8_t *read_mmio(struct CPUX86State *env, addr_t ptr, int bytes)
235 vmx_read_mem(ENV_GET_CPU(env), env->hvf_emul->mmio_buf, ptr, bytes);
236 return env->hvf_emul->mmio_buf;
240 addr_t read_val_ext(struct CPUX86State *env, addr_t ptr, int size)
245 if (is_host_reg(env, ptr)) {
246 return read_val_from_reg(ptr, size);
249 mmio_ptr = read_mmio(env, ptr, size);
252 val = *(uint8_t *)mmio_ptr;
255 val = *(uint16_t *)mmio_ptr;
258 val = *(uint32_t *)mmio_ptr;
261 val = *(uint64_t *)mmio_ptr;
264 VM_PANIC("bad size\n");
270 static void fetch_operands(struct CPUX86State *env, struct x86_decode *decode,
271 int n, bool val_op0, bool val_op1, bool val_op2)
274 bool calc_val[3] = {val_op0, val_op1, val_op2};
276 for (i = 0; i < n; i++) {
277 switch (decode->op[i].type) {
278 case X86_VAR_IMMEDIATE:
281 VM_PANIC_ON(!decode->op[i].ptr);
283 decode->op[i].val = read_val_from_reg(decode->op[i].ptr,
284 decode->operand_size);
288 calc_modrm_operand(env, decode, &decode->op[i]);
290 decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
291 decode->operand_size);
295 decode->op[i].ptr = decode_linear_addr(env, decode,
299 decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
300 decode->operand_size);
309 static void exec_mov(struct CPUX86State *env, struct x86_decode *decode)
311 fetch_operands(env, decode, 2, false, true, false);
312 write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
313 decode->operand_size);
315 RIP(env) += decode->len;
318 static void exec_add(struct CPUX86State *env, struct x86_decode *decode)
320 EXEC_2OP_ARITH_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
321 RIP(env) += decode->len;
324 static void exec_or(struct CPUX86State *env, struct x86_decode *decode)
326 EXEC_2OP_LOGIC_CMD(env, decode, |, SET_FLAGS_OSZAPC_LOGIC, true);
327 RIP(env) += decode->len;
330 static void exec_adc(struct CPUX86State *env, struct x86_decode *decode)
332 EXEC_2OP_ARITH_CMD(env, decode, +get_CF(env)+, SET_FLAGS_OSZAPC_ADD, true);
333 RIP(env) += decode->len;
336 static void exec_sbb(struct CPUX86State *env, struct x86_decode *decode)
338 EXEC_2OP_ARITH_CMD(env, decode, -get_CF(env)-, SET_FLAGS_OSZAPC_SUB, true);
339 RIP(env) += decode->len;
342 static void exec_and(struct CPUX86State *env, struct x86_decode *decode)
344 EXEC_2OP_LOGIC_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, true);
345 RIP(env) += decode->len;
348 static void exec_sub(struct CPUX86State *env, struct x86_decode *decode)
350 EXEC_2OP_ARITH_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, true);
351 RIP(env) += decode->len;
354 static void exec_xor(struct CPUX86State *env, struct x86_decode *decode)
356 EXEC_2OP_LOGIC_CMD(env, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true);
357 RIP(env) += decode->len;
360 static void exec_neg(struct CPUX86State *env, struct x86_decode *decode)
362 /*EXEC_2OP_ARITH_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);*/
364 fetch_operands(env, decode, 2, true, true, false);
366 val = 0 - sign(decode->op[1].val, decode->operand_size);
367 write_val_ext(env, decode->op[1].ptr, val, decode->operand_size);
369 if (4 == decode->operand_size) {
370 SET_FLAGS_OSZAPC_SUB_32(0, 0 - val, val);
371 } else if (2 == decode->operand_size) {
372 SET_FLAGS_OSZAPC_SUB_16(0, 0 - val, val);
373 } else if (1 == decode->operand_size) {
374 SET_FLAGS_OSZAPC_SUB_8(0, 0 - val, val);
376 VM_PANIC("bad op size\n");
379 /*lflags_to_rflags(env);*/
380 RIP(env) += decode->len;
383 static void exec_cmp(struct CPUX86State *env, struct x86_decode *decode)
385 EXEC_2OP_ARITH_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
386 RIP(env) += decode->len;
389 static void exec_inc(struct CPUX86State *env, struct x86_decode *decode)
391 decode->op[1].type = X86_VAR_IMMEDIATE;
392 decode->op[1].val = 0;
394 EXEC_2OP_ARITH_CMD(env, decode, +1+, SET_FLAGS_OSZAP_ADD, true);
396 RIP(env) += decode->len;
399 static void exec_dec(struct CPUX86State *env, struct x86_decode *decode)
401 decode->op[1].type = X86_VAR_IMMEDIATE;
402 decode->op[1].val = 0;
404 EXEC_2OP_ARITH_CMD(env, decode, -1-, SET_FLAGS_OSZAP_SUB, true);
405 RIP(env) += decode->len;
408 static void exec_tst(struct CPUX86State *env, struct x86_decode *decode)
410 EXEC_2OP_LOGIC_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, false);
411 RIP(env) += decode->len;
414 static void exec_not(struct CPUX86State *env, struct x86_decode *decode)
416 fetch_operands(env, decode, 1, true, false, false);
418 write_val_ext(env, decode->op[0].ptr, ~decode->op[0].val,
419 decode->operand_size);
420 RIP(env) += decode->len;
423 void exec_movzx(struct CPUX86State *env, struct x86_decode *decode)
426 int op_size = decode->operand_size;
428 fetch_operands(env, decode, 1, false, false, false);
430 if (0xb6 == decode->opcode[1]) {
435 decode->operand_size = src_op_size;
436 calc_modrm_operand(env, decode, &decode->op[1]);
437 decode->op[1].val = read_val_ext(env, decode->op[1].ptr, src_op_size);
438 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
440 RIP(env) += decode->len;
443 static void exec_out(struct CPUX86State *env, struct x86_decode *decode)
445 switch (decode->opcode[0]) {
447 hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &AL(env), 1, 1, 1);
450 hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &RAX(env), 1,
451 decode->operand_size, 1);
454 hvf_handle_io(ENV_GET_CPU(env), DX(env), &AL(env), 1, 1, 1);
457 hvf_handle_io(ENV_GET_CPU(env), DX(env), &RAX(env), 1, decode->operand_size, 1);
460 VM_PANIC("Bad out opcode\n");
463 RIP(env) += decode->len;
466 static void exec_in(struct CPUX86State *env, struct x86_decode *decode)
469 switch (decode->opcode[0]) {
471 hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &AL(env), 0, 1, 1);
474 hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &val, 0, decode->operand_size, 1);
475 if (decode->operand_size == 2) {
478 RAX(env) = (uint32_t)val;
482 hvf_handle_io(ENV_GET_CPU(env), DX(env), &AL(env), 0, 1, 1);
485 hvf_handle_io(ENV_GET_CPU(env), DX(env), &val, 0, decode->operand_size, 1);
486 if (decode->operand_size == 2) {
489 RAX(env) = (uint32_t)val;
494 VM_PANIC("Bad in opcode\n");
498 RIP(env) += decode->len;
501 static inline void string_increment_reg(struct CPUX86State *env, int reg,
502 struct x86_decode *decode)
504 addr_t val = read_reg(env, reg, decode->addressing_size);
505 if (env->hvf_emul->rflags.df) {
506 val -= decode->operand_size;
508 val += decode->operand_size;
510 write_reg(env, reg, val, decode->addressing_size);
513 static inline void string_rep(struct CPUX86State *env, struct x86_decode *decode,
514 void (*func)(struct CPUX86State *env,
515 struct x86_decode *ins), int rep)
517 addr_t rcx = read_reg(env, REG_RCX, decode->addressing_size);
520 write_reg(env, REG_RCX, rcx, decode->addressing_size);
521 if ((PREFIX_REP == rep) && !get_ZF(env)) {
524 if ((PREFIX_REPN == rep) && get_ZF(env)) {
530 static void exec_ins_single(struct CPUX86State *env, struct x86_decode *decode)
532 addr_t addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size,
535 hvf_handle_io(ENV_GET_CPU(env), DX(env), env->hvf_emul->mmio_buf, 0,
536 decode->operand_size, 1);
537 vmx_write_mem(ENV_GET_CPU(env), addr, env->hvf_emul->mmio_buf, decode->operand_size);
539 string_increment_reg(env, REG_RDI, decode);
542 static void exec_ins(struct CPUX86State *env, struct x86_decode *decode)
545 string_rep(env, decode, exec_ins_single, 0);
547 exec_ins_single(env, decode);
550 RIP(env) += decode->len;
553 static void exec_outs_single(struct CPUX86State *env, struct x86_decode *decode)
555 addr_t addr = decode_linear_addr(env, decode, RSI(env), REG_SEG_DS);
557 vmx_read_mem(ENV_GET_CPU(env), env->hvf_emul->mmio_buf, addr, decode->operand_size);
558 hvf_handle_io(ENV_GET_CPU(env), DX(env), env->hvf_emul->mmio_buf, 1,
559 decode->operand_size, 1);
561 string_increment_reg(env, REG_RSI, decode);
564 static void exec_outs(struct CPUX86State *env, struct x86_decode *decode)
567 string_rep(env, decode, exec_outs_single, 0);
569 exec_outs_single(env, decode);
572 RIP(env) += decode->len;
575 static void exec_movs_single(struct CPUX86State *env, struct x86_decode *decode)
581 src_addr = decode_linear_addr(env, decode, RSI(env), REG_SEG_DS);
582 dst_addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size,
585 val = read_val_ext(env, src_addr, decode->operand_size);
586 write_val_ext(env, dst_addr, val, decode->operand_size);
588 string_increment_reg(env, REG_RSI, decode);
589 string_increment_reg(env, REG_RDI, decode);
592 static void exec_movs(struct CPUX86State *env, struct x86_decode *decode)
595 string_rep(env, decode, exec_movs_single, 0);
597 exec_movs_single(env, decode);
600 RIP(env) += decode->len;
603 static void exec_cmps_single(struct CPUX86State *env, struct x86_decode *decode)
608 src_addr = decode_linear_addr(env, decode, RSI(env), REG_SEG_DS);
609 dst_addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size,
612 decode->op[0].type = X86_VAR_IMMEDIATE;
613 decode->op[0].val = read_val_ext(env, src_addr, decode->operand_size);
614 decode->op[1].type = X86_VAR_IMMEDIATE;
615 decode->op[1].val = read_val_ext(env, dst_addr, decode->operand_size);
617 EXEC_2OP_ARITH_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
619 string_increment_reg(env, REG_RSI, decode);
620 string_increment_reg(env, REG_RDI, decode);
623 static void exec_cmps(struct CPUX86State *env, struct x86_decode *decode)
626 string_rep(env, decode, exec_cmps_single, decode->rep);
628 exec_cmps_single(env, decode);
630 RIP(env) += decode->len;
634 static void exec_stos_single(struct CPUX86State *env, struct x86_decode *decode)
639 addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size, REG_SEG_ES);
640 val = read_reg(env, REG_RAX, decode->operand_size);
641 vmx_write_mem(ENV_GET_CPU(env), addr, &val, decode->operand_size);
643 string_increment_reg(env, REG_RDI, decode);
647 static void exec_stos(struct CPUX86State *env, struct x86_decode *decode)
650 string_rep(env, decode, exec_stos_single, 0);
652 exec_stos_single(env, decode);
655 RIP(env) += decode->len;
658 static void exec_scas_single(struct CPUX86State *env, struct x86_decode *decode)
662 addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size, REG_SEG_ES);
663 decode->op[1].type = X86_VAR_IMMEDIATE;
664 vmx_read_mem(ENV_GET_CPU(env), &decode->op[1].val, addr, decode->operand_size);
666 EXEC_2OP_ARITH_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
667 string_increment_reg(env, REG_RDI, decode);
670 static void exec_scas(struct CPUX86State *env, struct x86_decode *decode)
672 decode->op[0].type = X86_VAR_REG;
673 decode->op[0].reg = REG_RAX;
675 string_rep(env, decode, exec_scas_single, decode->rep);
677 exec_scas_single(env, decode);
680 RIP(env) += decode->len;
683 static void exec_lods_single(struct CPUX86State *env, struct x86_decode *decode)
688 addr = decode_linear_addr(env, decode, RSI(env), REG_SEG_DS);
689 vmx_read_mem(ENV_GET_CPU(env), &val, addr, decode->operand_size);
690 write_reg(env, REG_RAX, val, decode->operand_size);
692 string_increment_reg(env, REG_RSI, decode);
695 static void exec_lods(struct CPUX86State *env, struct x86_decode *decode)
698 string_rep(env, decode, exec_lods_single, 0);
700 exec_lods_single(env, decode);
703 RIP(env) += decode->len;
706 #define MSR_IA32_UCODE_REV 0x00000017
708 void simulate_rdmsr(struct CPUState *cpu)
710 X86CPU *x86_cpu = X86_CPU(cpu);
711 CPUX86State *env = &x86_cpu->env;
712 uint32_t msr = ECX(env);
717 val = rdtscp() + rvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET);
719 case MSR_IA32_APICBASE:
720 val = cpu_get_apic_base(X86_CPU(cpu)->apic_state);
722 case MSR_IA32_UCODE_REV:
723 val = (0x100000000ULL << 32) | 0x100000000ULL;
726 val = rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER);
729 val = rvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE);
732 val = rvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE);
734 case MSR_KERNELGSBASE:
735 val = rvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE);
746 case MSR_IA32_MISC_ENABLE:
747 val = env->msr_ia32_misc_enable;
749 case MSR_MTRRphysBase(0):
750 case MSR_MTRRphysBase(1):
751 case MSR_MTRRphysBase(2):
752 case MSR_MTRRphysBase(3):
753 case MSR_MTRRphysBase(4):
754 case MSR_MTRRphysBase(5):
755 case MSR_MTRRphysBase(6):
756 case MSR_MTRRphysBase(7):
757 val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base;
759 case MSR_MTRRphysMask(0):
760 case MSR_MTRRphysMask(1):
761 case MSR_MTRRphysMask(2):
762 case MSR_MTRRphysMask(3):
763 case MSR_MTRRphysMask(4):
764 case MSR_MTRRphysMask(5):
765 case MSR_MTRRphysMask(6):
766 case MSR_MTRRphysMask(7):
767 val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask;
769 case MSR_MTRRfix64K_00000:
770 val = env->mtrr_fixed[0];
772 case MSR_MTRRfix16K_80000:
773 case MSR_MTRRfix16K_A0000:
774 val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1];
776 case MSR_MTRRfix4K_C0000:
777 case MSR_MTRRfix4K_C8000:
778 case MSR_MTRRfix4K_D0000:
779 case MSR_MTRRfix4K_D8000:
780 case MSR_MTRRfix4K_E0000:
781 case MSR_MTRRfix4K_E8000:
782 case MSR_MTRRfix4K_F0000:
783 case MSR_MTRRfix4K_F8000:
784 val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3];
786 case MSR_MTRRdefType:
787 val = env->mtrr_deftype;
790 /* fprintf(stderr, "%s: unknown msr 0x%x\n", __func__, msr); */
795 RAX(env) = (uint32_t)val;
796 RDX(env) = (uint32_t)(val >> 32);
799 static void exec_rdmsr(struct CPUX86State *env, struct x86_decode *decode)
801 simulate_rdmsr(ENV_GET_CPU(env));
802 RIP(env) += decode->len;
805 void simulate_wrmsr(struct CPUState *cpu)
807 X86CPU *x86_cpu = X86_CPU(cpu);
808 CPUX86State *env = &x86_cpu->env;
809 uint32_t msr = ECX(env);
810 uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env);
814 /* if (!osx_is_sierra())
815 wvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET, data - rdtscp());
816 hv_vm_sync_tsc(data);*/
818 case MSR_IA32_APICBASE:
819 cpu_set_apic_base(X86_CPU(cpu)->apic_state, data);
822 wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, data);
825 wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, data);
827 case MSR_KERNELGSBASE:
828 wvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE, data);
840 env->hvf_emul->efer.efer = data;
841 /*printf("new efer %llx\n", EFER(cpu));*/
842 wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, data);
843 if (data & EFER_NXE) {
844 hv_vcpu_invalidate_tlb(cpu->hvf_fd);
847 case MSR_MTRRphysBase(0):
848 case MSR_MTRRphysBase(1):
849 case MSR_MTRRphysBase(2):
850 case MSR_MTRRphysBase(3):
851 case MSR_MTRRphysBase(4):
852 case MSR_MTRRphysBase(5):
853 case MSR_MTRRphysBase(6):
854 case MSR_MTRRphysBase(7):
855 env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data;
857 case MSR_MTRRphysMask(0):
858 case MSR_MTRRphysMask(1):
859 case MSR_MTRRphysMask(2):
860 case MSR_MTRRphysMask(3):
861 case MSR_MTRRphysMask(4):
862 case MSR_MTRRphysMask(5):
863 case MSR_MTRRphysMask(6):
864 case MSR_MTRRphysMask(7):
865 env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data;
867 case MSR_MTRRfix64K_00000:
868 env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data;
870 case MSR_MTRRfix16K_80000:
871 case MSR_MTRRfix16K_A0000:
872 env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data;
874 case MSR_MTRRfix4K_C0000:
875 case MSR_MTRRfix4K_C8000:
876 case MSR_MTRRfix4K_D0000:
877 case MSR_MTRRfix4K_D8000:
878 case MSR_MTRRfix4K_E0000:
879 case MSR_MTRRfix4K_E8000:
880 case MSR_MTRRfix4K_F0000:
881 case MSR_MTRRfix4K_F8000:
882 env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data;
884 case MSR_MTRRdefType:
885 env->mtrr_deftype = data;
891 /* Related to support known hypervisor interface */
892 /* if (g_hypervisor_iface)
893 g_hypervisor_iface->wrmsr_handler(cpu, msr, data);
895 printf("write msr %llx\n", RCX(cpu));*/
898 static void exec_wrmsr(struct CPUX86State *env, struct x86_decode *decode)
900 simulate_wrmsr(ENV_GET_CPU(env));
901 RIP(env) += decode->len;
906 * 0 - bt, 1 - btc, 2 - bts, 3 - btr
908 static void do_bt(struct CPUX86State *env, struct x86_decode *decode, int flag)
910 int32_t displacement;
913 int mask = (4 == decode->operand_size) ? 0x1f : 0xf;
915 VM_PANIC_ON(decode->rex.rex);
917 fetch_operands(env, decode, 2, false, true, false);
918 index = decode->op[1].val & mask;
920 if (decode->op[0].type != X86_VAR_REG) {
921 if (4 == decode->operand_size) {
922 displacement = ((int32_t) (decode->op[1].val & 0xffffffe0)) / 32;
923 decode->op[0].ptr += 4 * displacement;
924 } else if (2 == decode->operand_size) {
925 displacement = ((int16_t) (decode->op[1].val & 0xfff0)) / 16;
926 decode->op[0].ptr += 2 * displacement;
928 VM_PANIC("bt 64bit\n");
931 decode->op[0].val = read_val_ext(env, decode->op[0].ptr,
932 decode->operand_size);
933 cf = (decode->op[0].val >> index) & 0x01;
940 decode->op[0].val ^= (1u << index);
943 decode->op[0].val |= (1u << index);
946 decode->op[0].val &= ~(1u << index);
949 write_val_ext(env, decode->op[0].ptr, decode->op[0].val,
950 decode->operand_size);
954 static void exec_bt(struct CPUX86State *env, struct x86_decode *decode)
956 do_bt(env, decode, 0);
957 RIP(env) += decode->len;
960 static void exec_btc(struct CPUX86State *env, struct x86_decode *decode)
962 do_bt(env, decode, 1);
963 RIP(env) += decode->len;
966 static void exec_btr(struct CPUX86State *env, struct x86_decode *decode)
968 do_bt(env, decode, 3);
969 RIP(env) += decode->len;
972 static void exec_bts(struct CPUX86State *env, struct x86_decode *decode)
974 do_bt(env, decode, 2);
975 RIP(env) += decode->len;
978 void exec_shl(struct CPUX86State *env, struct x86_decode *decode)
983 fetch_operands(env, decode, 2, true, true, false);
985 count = decode->op[1].val;
986 count &= 0x1f; /* count is masked to 5 bits*/
991 switch (decode->operand_size) {
996 res = (decode->op[0].val << count);
997 cf = (decode->op[0].val >> (8 - count)) & 0x1;
998 of = cf ^ (res >> 7);
1001 write_val_ext(env, decode->op[0].ptr, res, 1);
1002 SET_FLAGS_OSZAPC_LOGIC_8(res);
1003 SET_FLAGS_OxxxxC(env, of, cf);
1012 res = (decode->op[0].val << count);
1013 cf = (decode->op[0].val >> (16 - count)) & 0x1;
1014 of = cf ^ (res >> 15); /* of = cf ^ result15 */
1017 write_val_ext(env, decode->op[0].ptr, res, 2);
1018 SET_FLAGS_OSZAPC_LOGIC_16(res);
1019 SET_FLAGS_OxxxxC(env, of, cf);
1024 uint32_t res = decode->op[0].val << count;
1026 write_val_ext(env, decode->op[0].ptr, res, 4);
1027 SET_FLAGS_OSZAPC_LOGIC_32(res);
1028 cf = (decode->op[0].val >> (32 - count)) & 0x1;
1029 of = cf ^ (res >> 31); /* of = cf ^ result31 */
1030 SET_FLAGS_OxxxxC(env, of, cf);
1038 /* lflags_to_rflags(env); */
1039 RIP(env) += decode->len;
1042 void exec_movsx(CPUX86State *env, struct x86_decode *decode)
1045 int op_size = decode->operand_size;
1047 fetch_operands(env, decode, 2, false, false, false);
1049 if (0xbe == decode->opcode[1]) {
1055 decode->operand_size = src_op_size;
1056 calc_modrm_operand(env, decode, &decode->op[1]);
1057 decode->op[1].val = sign(read_val_ext(env, decode->op[1].ptr, src_op_size),
1060 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
1062 RIP(env) += decode->len;
1065 void exec_ror(struct CPUX86State *env, struct x86_decode *decode)
1069 fetch_operands(env, decode, 2, true, true, false);
1070 count = decode->op[1].val;
1072 switch (decode->operand_size) {
1075 uint32_t bit6, bit7;
1078 if ((count & 0x07) == 0) {
1080 bit6 = ((uint8_t)decode->op[0].val >> 6) & 1;
1081 bit7 = ((uint8_t)decode->op[0].val >> 7) & 1;
1082 SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
1085 count &= 0x7; /* use only bottom 3 bits */
1086 res = ((uint8_t)decode->op[0].val >> count) |
1087 ((uint8_t)decode->op[0].val << (8 - count));
1088 write_val_ext(env, decode->op[0].ptr, res, 1);
1089 bit6 = (res >> 6) & 1;
1090 bit7 = (res >> 7) & 1;
1091 /* set eflags: ROR count affects the following flags: C, O */
1092 SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
1098 uint32_t bit14, bit15;
1101 if ((count & 0x0f) == 0) {
1103 bit14 = ((uint16_t)decode->op[0].val >> 14) & 1;
1104 bit15 = ((uint16_t)decode->op[0].val >> 15) & 1;
1105 /* of = result14 ^ result15 */
1106 SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
1109 count &= 0x0f; /* use only 4 LSB's */
1110 res = ((uint16_t)decode->op[0].val >> count) |
1111 ((uint16_t)decode->op[0].val << (16 - count));
1112 write_val_ext(env, decode->op[0].ptr, res, 2);
1114 bit14 = (res >> 14) & 1;
1115 bit15 = (res >> 15) & 1;
1116 /* of = result14 ^ result15 */
1117 SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
1123 uint32_t bit31, bit30;
1128 res = ((uint32_t)decode->op[0].val >> count) |
1129 ((uint32_t)decode->op[0].val << (32 - count));
1130 write_val_ext(env, decode->op[0].ptr, res, 4);
1132 bit31 = (res >> 31) & 1;
1133 bit30 = (res >> 30) & 1;
1134 /* of = result30 ^ result31 */
1135 SET_FLAGS_OxxxxC(env, bit30 ^ bit31, bit31);
1140 RIP(env) += decode->len;
1143 void exec_rol(struct CPUX86State *env, struct x86_decode *decode)
1147 fetch_operands(env, decode, 2, true, true, false);
1148 count = decode->op[1].val;
1150 switch (decode->operand_size) {
1153 uint32_t bit0, bit7;
1156 if ((count & 0x07) == 0) {
1158 bit0 = ((uint8_t)decode->op[0].val & 1);
1159 bit7 = ((uint8_t)decode->op[0].val >> 7);
1160 SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
1163 count &= 0x7; /* use only lowest 3 bits */
1164 res = ((uint8_t)decode->op[0].val << count) |
1165 ((uint8_t)decode->op[0].val >> (8 - count));
1167 write_val_ext(env, decode->op[0].ptr, res, 1);
1169 * ROL count affects the following flags: C, O
1173 SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
1179 uint32_t bit0, bit15;
1182 if ((count & 0x0f) == 0) {
1184 bit0 = ((uint16_t)decode->op[0].val & 0x1);
1185 bit15 = ((uint16_t)decode->op[0].val >> 15);
1186 /* of = cf ^ result15 */
1187 SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
1190 count &= 0x0f; /* only use bottom 4 bits */
1191 res = ((uint16_t)decode->op[0].val << count) |
1192 ((uint16_t)decode->op[0].val >> (16 - count));
1194 write_val_ext(env, decode->op[0].ptr, res, 2);
1196 bit15 = (res >> 15);
1197 /* of = cf ^ result15 */
1198 SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
1204 uint32_t bit0, bit31;
1209 res = ((uint32_t)decode->op[0].val << count) |
1210 ((uint32_t)decode->op[0].val >> (32 - count));
1212 write_val_ext(env, decode->op[0].ptr, res, 4);
1214 bit31 = (res >> 31);
1215 /* of = cf ^ result31 */
1216 SET_FLAGS_OxxxxC(env, bit0 ^ bit31, bit0);
1221 RIP(env) += decode->len;
1225 void exec_rcl(struct CPUX86State *env, struct x86_decode *decode)
1230 fetch_operands(env, decode, 2, true, true, false);
1231 count = decode->op[1].val & 0x1f;
1233 switch (decode->operand_size) {
1236 uint8_t op1_8 = decode->op[0].val;
1244 res = (op1_8 << 1) | get_CF(env);
1246 res = (op1_8 << count) | (get_CF(env) << (count - 1)) |
1247 (op1_8 >> (9 - count));
1250 write_val_ext(env, decode->op[0].ptr, res, 1);
1252 cf = (op1_8 >> (8 - count)) & 0x01;
1253 of = cf ^ (res >> 7); /* of = cf ^ result7 */
1254 SET_FLAGS_OxxxxC(env, of, cf);
1260 uint16_t op1_16 = decode->op[0].val;
1268 res = (op1_16 << 1) | get_CF(env);
1269 } else if (count == 16) {
1270 res = (get_CF(env) << 15) | (op1_16 >> 1);
1271 } else { /* 2..15 */
1272 res = (op1_16 << count) | (get_CF(env) << (count - 1)) |
1273 (op1_16 >> (17 - count));
1276 write_val_ext(env, decode->op[0].ptr, res, 2);
1278 cf = (op1_16 >> (16 - count)) & 0x1;
1279 of = cf ^ (res >> 15); /* of = cf ^ result15 */
1280 SET_FLAGS_OxxxxC(env, of, cf);
1286 uint32_t op1_32 = decode->op[0].val;
1293 res = (op1_32 << 1) | get_CF(env);
1295 res = (op1_32 << count) | (get_CF(env) << (count - 1)) |
1296 (op1_32 >> (33 - count));
1299 write_val_ext(env, decode->op[0].ptr, res, 4);
1301 cf = (op1_32 >> (32 - count)) & 0x1;
1302 of = cf ^ (res >> 31); /* of = cf ^ result31 */
1303 SET_FLAGS_OxxxxC(env, of, cf);
1307 RIP(env) += decode->len;
1310 void exec_rcr(struct CPUX86State *env, struct x86_decode *decode)
1315 fetch_operands(env, decode, 2, true, true, false);
1316 count = decode->op[1].val & 0x1f;
1318 switch (decode->operand_size) {
1321 uint8_t op1_8 = decode->op[0].val;
1328 res = (op1_8 >> count) | (get_CF(env) << (8 - count)) |
1329 (op1_8 << (9 - count));
1331 write_val_ext(env, decode->op[0].ptr, res, 1);
1333 cf = (op1_8 >> (count - 1)) & 0x1;
1334 of = (((res << 1) ^ res) >> 7) & 0x1; /* of = result6 ^ result7 */
1335 SET_FLAGS_OxxxxC(env, of, cf);
1340 uint16_t op1_16 = decode->op[0].val;
1347 res = (op1_16 >> count) | (get_CF(env) << (16 - count)) |
1348 (op1_16 << (17 - count));
1350 write_val_ext(env, decode->op[0].ptr, res, 2);
1352 cf = (op1_16 >> (count - 1)) & 0x1;
1353 of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1; /* of = result15 ^
1355 SET_FLAGS_OxxxxC(env, of, cf);
1361 uint32_t op1_32 = decode->op[0].val;
1368 res = (op1_32 >> 1) | (get_CF(env) << 31);
1370 res = (op1_32 >> count) | (get_CF(env) << (32 - count)) |
1371 (op1_32 << (33 - count));
1374 write_val_ext(env, decode->op[0].ptr, res, 4);
1376 cf = (op1_32 >> (count - 1)) & 0x1;
1377 of = ((res << 1) ^ res) >> 31; /* of = result30 ^ result31 */
1378 SET_FLAGS_OxxxxC(env, of, cf);
1382 RIP(env) += decode->len;
1385 static void exec_xchg(struct CPUX86State *env, struct x86_decode *decode)
1387 fetch_operands(env, decode, 2, true, true, false);
1389 write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
1390 decode->operand_size);
1391 write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
1392 decode->operand_size);
1394 RIP(env) += decode->len;
1397 static void exec_xadd(struct CPUX86State *env, struct x86_decode *decode)
1399 EXEC_2OP_ARITH_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
1400 write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
1401 decode->operand_size);
1403 RIP(env) += decode->len;
1406 static struct cmd_handler {
1407 enum x86_decode_cmd cmd;
1408 void (*handler)(struct CPUX86State *env, struct x86_decode *ins);
1410 {X86_DECODE_CMD_INVL, NULL,},
1411 {X86_DECODE_CMD_MOV, exec_mov},
1412 {X86_DECODE_CMD_ADD, exec_add},
1413 {X86_DECODE_CMD_OR, exec_or},
1414 {X86_DECODE_CMD_ADC, exec_adc},
1415 {X86_DECODE_CMD_SBB, exec_sbb},
1416 {X86_DECODE_CMD_AND, exec_and},
1417 {X86_DECODE_CMD_SUB, exec_sub},
1418 {X86_DECODE_CMD_NEG, exec_neg},
1419 {X86_DECODE_CMD_XOR, exec_xor},
1420 {X86_DECODE_CMD_CMP, exec_cmp},
1421 {X86_DECODE_CMD_INC, exec_inc},
1422 {X86_DECODE_CMD_DEC, exec_dec},
1423 {X86_DECODE_CMD_TST, exec_tst},
1424 {X86_DECODE_CMD_NOT, exec_not},
1425 {X86_DECODE_CMD_MOVZX, exec_movzx},
1426 {X86_DECODE_CMD_OUT, exec_out},
1427 {X86_DECODE_CMD_IN, exec_in},
1428 {X86_DECODE_CMD_INS, exec_ins},
1429 {X86_DECODE_CMD_OUTS, exec_outs},
1430 {X86_DECODE_CMD_RDMSR, exec_rdmsr},
1431 {X86_DECODE_CMD_WRMSR, exec_wrmsr},
1432 {X86_DECODE_CMD_BT, exec_bt},
1433 {X86_DECODE_CMD_BTR, exec_btr},
1434 {X86_DECODE_CMD_BTC, exec_btc},
1435 {X86_DECODE_CMD_BTS, exec_bts},
1436 {X86_DECODE_CMD_SHL, exec_shl},
1437 {X86_DECODE_CMD_ROL, exec_rol},
1438 {X86_DECODE_CMD_ROR, exec_ror},
1439 {X86_DECODE_CMD_RCR, exec_rcr},
1440 {X86_DECODE_CMD_RCL, exec_rcl},
1441 /*{X86_DECODE_CMD_CPUID, exec_cpuid},*/
1442 {X86_DECODE_CMD_MOVS, exec_movs},
1443 {X86_DECODE_CMD_CMPS, exec_cmps},
1444 {X86_DECODE_CMD_STOS, exec_stos},
1445 {X86_DECODE_CMD_SCAS, exec_scas},
1446 {X86_DECODE_CMD_LODS, exec_lods},
1447 {X86_DECODE_CMD_MOVSX, exec_movsx},
1448 {X86_DECODE_CMD_XCHG, exec_xchg},
1449 {X86_DECODE_CMD_XADD, exec_xadd},
1452 static struct cmd_handler _cmd_handler[X86_DECODE_CMD_LAST];
1454 static void init_cmd_handler()
1457 for (i = 0; i < ARRAY_SIZE(handlers); i++) {
1458 _cmd_handler[handlers[i].cmd] = handlers[i];
1462 void load_regs(struct CPUState *cpu)
1464 X86CPU *x86_cpu = X86_CPU(cpu);
1465 CPUX86State *env = &x86_cpu->env;
1468 RRX(env, REG_RAX) = rreg(cpu->hvf_fd, HV_X86_RAX);
1469 RRX(env, REG_RBX) = rreg(cpu->hvf_fd, HV_X86_RBX);
1470 RRX(env, REG_RCX) = rreg(cpu->hvf_fd, HV_X86_RCX);
1471 RRX(env, REG_RDX) = rreg(cpu->hvf_fd, HV_X86_RDX);
1472 RRX(env, REG_RSI) = rreg(cpu->hvf_fd, HV_X86_RSI);
1473 RRX(env, REG_RDI) = rreg(cpu->hvf_fd, HV_X86_RDI);
1474 RRX(env, REG_RSP) = rreg(cpu->hvf_fd, HV_X86_RSP);
1475 RRX(env, REG_RBP) = rreg(cpu->hvf_fd, HV_X86_RBP);
1476 for (i = 8; i < 16; i++) {
1477 RRX(env, i) = rreg(cpu->hvf_fd, HV_X86_RAX + i);
1480 RFLAGS(env) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
1481 rflags_to_lflags(env);
1482 RIP(env) = rreg(cpu->hvf_fd, HV_X86_RIP);
1485 void store_regs(struct CPUState *cpu)
1487 X86CPU *x86_cpu = X86_CPU(cpu);
1488 CPUX86State *env = &x86_cpu->env;
1491 wreg(cpu->hvf_fd, HV_X86_RAX, RAX(env));
1492 wreg(cpu->hvf_fd, HV_X86_RBX, RBX(env));
1493 wreg(cpu->hvf_fd, HV_X86_RCX, RCX(env));
1494 wreg(cpu->hvf_fd, HV_X86_RDX, RDX(env));
1495 wreg(cpu->hvf_fd, HV_X86_RSI, RSI(env));
1496 wreg(cpu->hvf_fd, HV_X86_RDI, RDI(env));
1497 wreg(cpu->hvf_fd, HV_X86_RBP, RBP(env));
1498 wreg(cpu->hvf_fd, HV_X86_RSP, RSP(env));
1499 for (i = 8; i < 16; i++) {
1500 wreg(cpu->hvf_fd, HV_X86_RAX + i, RRX(env, i));
1503 lflags_to_rflags(env);
1504 wreg(cpu->hvf_fd, HV_X86_RFLAGS, RFLAGS(env));
1505 macvm_set_rip(cpu, RIP(env));
1508 bool exec_instruction(struct CPUX86State *env, struct x86_decode *ins)
1510 /*if (hvf_vcpu_id(cpu))
1511 printf("%d, %llx: exec_instruction %s\n", hvf_vcpu_id(cpu), RIP(cpu),
1512 decode_cmd_to_string(ins->cmd));*/
1514 if (0 && ins->is_fpu) {
1515 VM_PANIC("emulate fpu\n");
1517 if (!_cmd_handler[ins->cmd].handler) {
1518 printf("Unimplemented handler (%llx) for %d (%x %x) \n", RIP(env),
1519 ins->cmd, ins->opcode[0],
1520 ins->opcode_len > 1 ? ins->opcode[1] : 0);
1521 RIP(env) += ins->len;
1525 VM_PANIC_ON_EX(!_cmd_handler[ins->cmd].handler,
1526 "Unimplemented handler (%llx) for %d (%x %x) \n", RIP(env),
1527 ins->cmd, ins->opcode[0],
1528 ins->opcode_len > 1 ? ins->opcode[1] : 0);
1529 _cmd_handler[ins->cmd].handler(env, ins);